I have a load test for a WCF service, where we are trying out different compression libraries and configurations and we need to measure the total number of mb sent during a test. Is there a performance counter that measure the relative trafic pr. test. If so, how do I add it to my load test - it seems that only a fraction of the performance counters are visible - E.g. under the category "Web Service", i don't see the performance counter "Total Bytes Received" in VS Load test, but I can find it in PerfMon.
Thanks
In the load test expand Counter Sets > Expand applicable > Right Click on Counter Sets > Add Counters
You can implement your own custom performance counter like so:
using System;
using System.Diagnostics;
using System.Net.NetworkInformation;
namespace PerfCounter
{
class PerfCounter
{
private const String categoryName = "Custom category";
private const String counterName = "Total bytes received";
private const String categoryHelp = "A category for custom performance counters";
private const String counterHelp = "Total bytes received on network interface";
private const String lanName = "Local Area Connection"; // change this to match your network connection
private const int sampleRateInMillis = 1000;
private const int numberofSamples = 200000;
private static NetworkInterface lan = null;
private static PerformanceCounter perfCounter;
private static long initialReceivedBytes;
static void Main(string[] args)
{
setupLAN();
setupCategory();
createCounters();
updatePerfCounters();
}
private static void setupCategory()
{
if (!PerformanceCounterCategory.Exists(categoryName))
{
CounterCreationDataCollection counterCreationDataCollection = new CounterCreationDataCollection();
CounterCreationData totalBytesReceived = new CounterCreationData();
totalBytesReceived.CounterType = PerformanceCounterType.NumberOfItems64;
totalBytesReceived.CounterName = counterName;
counterCreationDataCollection.Add(totalBytesReceived);
PerformanceCounterCategory.Create(categoryName, categoryHelp, PerformanceCounterCategoryType.MultiInstance, counterCreationDataCollection);
}
else
Console.WriteLine("Category {0} exists", categoryName);
}
private static void createCounters()
{
perfCounter = new PerformanceCounter(categoryName, counterName, false);
perfCounter.RawValue = getTotalBytesReceived();
}
private static long getTotalBytesReceived()
{
return lan.GetIPv4Statistics().BytesReceived;
}
private static void setupLAN()
{
NetworkInterface[] interfaces = NetworkInterface.GetAllNetworkInterfaces();
foreach (NetworkInterface networkInterface in interfaces)
{
if (networkInterface.Name.Equals(lanName))
lan = networkInterface;
}
initialReceivedBytes = lan.GetIPv4Statistics().BytesReceived;
}
private static void updatePerfCounters()
{
for (int i = 0; i < numberofSamples; i++)
{
perfCounter.RawValue = getTotalBytesReceived();
Console.WriteLine("received: {0} bytes", perfCounter.RawValue - initialReceivedBytes);
System.Threading.Thread.Sleep(sampleRateInMillis);
}
}
}
}
Related
Trying to use Robolectric 4.3.1 to do the most basic of Android actions, get the Context.
I get a non-null context by doing this (tried many other attempts but all end up getting context = null):
Context context = RuntimeEnvironment.systemContext;
I can pass the object into some methods but I can never use it.
If I try
File dir = context.getFilesDir();
I get
java.lang.RuntimeException: No data directory found for package android
What am I doing wrong?
Here is my code:
#RunWith(RobolectricTestRunner.class)
#Config(sdk = Build.VERSION_CODES.O)
public class BtScannerTests
{
private BluetoothAdapter bluetoothAdapter;
#Before
public void setUp() throws Exception
{
bluetoothAdapter = Shadow.newInstanceOf(BluetoothAdapter.class);
}
private static boolean done = false;
#Test
public void testBtScannerCycle() throws InterruptedException
{
IntermediaryCallback intermediaryCallback = new IntermediaryCallback()
{
#Override
public void onReceiveMdsIntermediary(MdsIntermediary mds, int connectionHandle)
{
}
#Override
public void onReceiveMetricIntermediaries(List<MetricIntermediary> metricList, MdsIntermediary mds, int connectionHandle)
{
}
};
StatusEventCallback statusEventCallback = new StatusEventCallback()
{
#Override
public void onStatusEvent(StatusEvent statusEvent, int connectionHandle, String btAddress)
{
System.out.println("Status event " + statusEvent.name());
if(statusEvent == StatusEvent.SCANNING_PAUSED);
{
done = true;
}
}
};
Context context = RuntimeEnvironment.systemContext;
File dir = context.getFilesDir(); // This is the code that fails; put here to test attempts
AndroidBtManager.setStatusEventCallback(statusEventCallback);
AndroidBtManager androidBtManager =
new AndroidBtManager(context, intermediaryCallback, false, false, true);
BtScanner btScanner = androidBtManager.getBtScanner();
while(!done)
{
Thread.sleep(1000);
}
}
}
Use this one.
Context context = ApplicationProvider.getApplicationContext();
instead of
Context context = RuntimeEnvironment.systemContext;
I want to move from anonymous relationships (childDocuments) to labelled.
During testing, performance degradation was detected when integrating documents into Solr on identical schemas and documents.
Solr (8.1.1) configuration (local, 1 node, default settings): solr -e cloud
Test: start integration of 500 documents several times and calculate the average integration time.
Labelled relationship example:
{
"id": "parent_xxx",
"items": [{"id": "child_xxx"}]
}
Anonymous relationship example:
{
"id": "parent_xxx",
"_childDocuments_": [{"id": "child_xxx"}]
}
import org.apache.solr.client.solrj.SolrClient;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.HttpSolrClient;
import org.apache.solr.common.SolrInputDocument;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class Scratch {
private static final int DOC_COUNT = 500;
private static final int ITERATION_COUNT = 5;
private static final boolean ANONYMOUS_CHILDREN = true;
private static final boolean LABELED_CHILDREN = false;
public static void main(String[] args) throws IOException, SolrServerException {
long anonymousTime = 0;
long labelledTime = 0;
for (int i = 0; i < ITERATION_COUNT; i++) {
List<SolrInputDocument> anonymousDocs = createSolrDocuments(ANONYMOUS_CHILDREN);
cleanSolrCollection();
anonymousTime += writeToSolr(anonymousDocs);
List<SolrInputDocument> labeledDocs = createSolrDocuments(LABELED_CHILDREN);
cleanSolrCollection();
labelledTime += writeToSolr(labeledDocs);
}
System.out.println("Avg anonymous time: " + (anonymousTime / ITERATION_COUNT));
System.out.println("Avg labelled time: " + (labelledTime / ITERATION_COUNT));
}
private static List<SolrInputDocument> createSolrDocuments(boolean isAnonymous) {
List<SolrInputDocument> request = new ArrayList<>();
for (int i = 0; i < DOC_COUNT; i++) {
SolrInputDocument parent = new SolrInputDocument();
parent.setField("id", "parent_" + i);
SolrInputDocument child = new SolrInputDocument();
child.setField("id", "child_" + i);
if (isAnonymous) {
parent.addChildDocument(child);
} else {
parent.addField("items", child);
}
request.add(parent);
}
return request;
}
private static void cleanSolrCollection() throws IOException, SolrServerException {
try (SolrClient client = getSolrClient()) {
client.deleteByQuery("main", "*:*");
}
}
private static long writeToSolr(List<SolrInputDocument> documents) throws IOException, SolrServerException {
long startAt = System.currentTimeMillis();
try (SolrClient client = getSolrClient()) {
client.add("main", documents);
}
return System.currentTimeMillis() - startAt;
}
private static SolrClient getSolrClient() {
return new HttpSolrClient.Builder("http://localhost:8983/solr")
.allowCompression(true)
.build();
}
}
Results:
500 docs with anonymous relationship ~ 29ms
500 docs with labelled relationship ~ 981ms
Is it normal behavior for Solr when working with named relationships?
I have not been able to find any information about that.
The performance difference of 20-30 times does look strange.
In a list of URLs
http://a.com/foo
http://b.com/bar
http://a.com/monkey
http://c.com/prune
http://a.com/bear
http://b.com/walrus
http://b.com/baz
http://b.com/plugh
I want to maximise the distance between any pair of a.com's, any pair of b.com's etc. This needs to be cheap but does not have to be optimum. (I am using a list of URLs to download files from websites a.com, b.com, c.com, and do not wish to visit any particular site with a higher frequency than necessary. In the example here, we would hit the b.com site 3 times in succession, which should be avoided.)
I would ideally like a Java library but would settle for pseudocode.
Maximise sum of pairwise distances in array seems to be a similar problem but didn't have a simple answer - I simply want something that's "good enough"
Since no answers, I wrote my own. It's very crude but works. It reads a list of URLs, extracts the hosts, counts them and then fills a pigeon-hole array with indexes proportional to the inverse frequency of the hosts.
package org.xmlcml.cmine.util;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import com.google.common.collect.HashMultiset;
import com.google.common.collect.Multiset;
public class URLShuffler {
public static final Logger LOG = Logger.getLogger(URLShuffler.class);
static {
LOG.setLevel(Level.DEBUG);
}
// in case we needed extra pigeonholes but it doesn't seem to for medium problems
private static int TOL = 1;
private List<String> urls;
private Multiset<String> domains;
private Map<String, Integer> currentIndexByDomain;
private Map<String, Integer> countByDomain;
private List<String> outputUrls;
public URLShuffler() {
}
public void readURLs(List<String> urls) {
this.urls= urls;
domains = HashMultiset.create();
for (String url : urls) {
String domain = getDomain(url);
domains.add(domain);
}
LOG.debug(domains);
}
// this would be better using java.net.URL
private String getDomain(String url) {
int idx = url.indexOf("//");
if (idx != -1) {
url = url.substring(idx+2);
}
idx = url.indexOf("/");
String domain = url.substring(0, idx);
return domain;
}
public List<String> getShuffledUrls() {
currentIndexByDomain = new HashMap<String, Integer>();
countByDomain = new HashMap<String, Integer>();
outputUrls = new ArrayList<String>();
for (int i = 0; i < urls.size() * TOL; i++) {
outputUrls.add("");
}
// this is a convenience method wrapping Guava sort.
for (Multiset.Entry<String> entry : CMineUtil.getEntriesSortedByCount(domains)) {
LOG.debug(entry);
countByDomain.put(entry.getElement(), entry.getCount());
currentIndexByDomain.put(entry.getElement(), entry.getCount() - 1);
}
for (String url : urls) {
String domain = getDomain(url);
Integer currentIndex = currentIndexByDomain.get(domain);
Integer count = countByDomain.get(domain);
int slot = (urls.size() * currentIndex * TOL) / count;
currentIndexByDomain.put(domain, currentIndex - 1);
addUrl(url, slot);
}
return outputUrls;
}
private void addUrl(String url, int slot) {
boolean filled = fillLower(url, slot);
if (!filled) {
fillUpper(url, slot);
}
}
// if slot is not free run upwards till next free slot
private boolean fillUpper(String url, int slot) {
for (int i = slot; i < outputUrls.size(); i++) {
if (fill(url, i)) {
return true;
}
}
return false;
}
// if slot is not free run downwards till next free slot
private boolean fillLower(String url, int slot) {
for (int i = slot; i >= 0; i--) {
if (fill(url, i)) {
return true;
}
}
return false;
}
private boolean fill(String url, int slot) {
if (outputUrls.get(slot).equals("")) {
outputUrls.set(slot, url);
return true;
}
return false;
}
}
```
So i have this Base Class:
public abstract class WiresharkFile : IDisposable
{
private string _fileName;
private int _packets;
private int _packetsSent;
private string _duration;
private double _speed;
private int _progress;
protected abstract WiresharkFilePacket ReadPacket();
public abstract IEnumerator<WiresharkFilePacket> GetEnumerator();
public abstract void Rewind();
public string FileName
{
get { return _fileName; }
set { _fileName = value; }
}
public int Packets
{
get { return _packets; }
set { _packets = value; }
}
public void Dispose()
{
// implemented inside sub class.
}
}
And specific Wireshark format (libpcap):
public class Libpcap : WiresharkFile, IDisposable, IEnumerable<WiresharkFilePacket>
{
private BinaryReader binaryReader;
private Version version;
private uint snaplen;
private int thiszone;
private uint sigfigs;
private LibpcapLinkType linktype;
private long basePos;
private bool byteSwap;
private static uint MAGIC = 0xa1b2c3d4;
private static uint MAGIC_ENDIAN = 0xd4c3b2a1;
public Libpcap(string path)
: this(new FileStream(path, FileMode.Open, FileAccess.Read))
{
FileName = path;
}
private Libpcap(Stream fileStream)
{
...
}
public override void Rewind()
{
binaryReader = new BinaryReader(new FileStream(FileName, FileMode.Open, FileAccess.Read));
binaryReader.BaseStream.Position = basePos;
}
public void Dispose()
{
if (binaryReader != null)
binaryReader.Close();
}
I removed almost all parts of how i am read this file
Add files in to my application
I have this objects list:
public ObservableCollection<WiresharkFile> wiresharkFiles { get; set; }
This list is binding into my ListView.
When the user choose files to add into my application:
string[] files = openFileDialog.FileNames;
I am check this files via another class:
public class FileValidation
{
public static void DoWork(IEnumerable<string> files)
{
CancellationTokenSource tokenSource = new CancellationTokenSource();
CancellationToken token = tokenSource.Token;
Task task = Task.Factory.StartNew(() =>
{
try
{
Parallel.ForEach(files,
new ParallelOptions
{
MaxDegreeOfParallelism = 3
},
file =>
{
ProcessFile(file);
});
}
catch (Exception)
{ }
}, tokenSource.Token,
TaskCreationOptions.None,
TaskScheduler.Default).ContinueWith
(t =>
{
if (FinishValidationEventHandler != null)
FinishValidationEventHandler();
}
, TaskScheduler.FromCurrentSynchronizationContext()
);
}
private static void ProcessFile(string file)
{
ReadWiresharkFormat(file);
using (WiresharkFile wiresharkFile = new Libpcap(file))
{
WiresharkFileInfo.ReadInfo(wiresharkFile);
// Add file into my list.
}
}
private static WiresharkFileFormat ReadWiresharkFormat(string file)
{
using (BinaryReader binaryReader = new BinaryReader(File.Open(file, FileMode.Open, FileAccess.Read)))
{
// Open file and read first 4 bytes in order to verify file type.
}
}
private static void ReadInfo(WiresharkFile wiresharkFile)
{
foreach (WiresharkFilePacket packet in wiresharkFile)
{
// Collect file information (number of packets...)
}
}
}
OK so until here all good.
Now when add many files, lets say (1000+-) i can see that my memory usage is growing in 200MB but after clear this list the memory usage not changed.
Any idea what could cause this ?
I have a trending application. The app gets list of trends from Tweeter every an hour and put it in data store (without checking for duplication based on name). Therefore, for a particular word such as "Thaipusam" I'll have so many objects that their only difference is date which has been calculated by long date = new Date().getTime(); whenever background process want to push it to data store.
Each object is like this:
{
id: 4749890231992320,
name: "Thaipusam",
url: "http://twitter.com/search?q=Thaipusam",
query: "Thaipusam",
location: "Kuala Lumpur",
woeid: 1154781,
date: 1389865326440
}
Since I have over 5000 objects in the datastore, I need to know how many times each object searched per day.
What is the best/efficient practice to calculate/query repeatation per day?
I have no idea it's the most efficient way or not but I did this in my servlet and result is satisfactory:
private static final long MILLIS_PER_DAY = 24 * 60 * 60 * 1000;
private static final long MILLIS_PER_WEEK = 7 * MILLIS_PER_DAY;
private static final long MILLIS_PER_MONTH = 30 * MILLIS_PER_DAY;
private static final long MILLIS_PER_YEAR = 365 * MILLIS_PER_DAY;
private static final String QUERY_PER_PERIOD = "SELECT m.date FROM TwitterTrendsJPA As m WHERE m.name = :keyword1 AND m.date >= :keyword2";
in doGet() method:
long currentTime = new Date().getTime();
long last24hours = currentTime - MILLIS_PER_DAY;
long last7days = currentTime - MILLIS_PER_WEEK;
long last30days = currentTime - MILLIS_PER_MONTH;
long last365days = currentTime - MILLIS_PER_YEAR;
#SuppressWarnings("unchecked")
List<Long> statList;
Statistic statistic = new Statistic();
Query query1 = em.createQuery(QUERY_PER_PERIOD);
query1.setParameter("keyword1", p2);
query1.setParameter("keyword2", last24hours);
statList = (List<Long>) query1.getResultList();
statistic.setPerDay(statList);
Query query2 = em.createQuery(QUERY_PER_PERIOD);
query2.setParameter("keyword1", p2);
query2.setParameter("keyword2", last7days);
statList = (List<Long>) query2.getResultList();
statistic.setPerWeek(statList);
Query query3 = em.createQuery(QUERY_PER_PERIOD);
query3.setParameter("keyword1", p2);
query3.setParameter("keyword2", last30days);
statList = (List<Long>) query3.getResultList();
statistic.setPerMonth(statList);
Query query4 = em.createQuery(QUERY_PER_PERIOD);
query4.setParameter("keyword1", p2);
query4.setParameter("keyword2", last365days);
statList = (List<Long>) query4.getResultList();
statistic.setPerYear(statList);
and Statistic class is like this:
private final class Statistic {
private List<Long> perDay;
private List<Long> perWeek;
private List<Long> perMonth;
private List<Long> perYear;
public List<Long> getPerDay() {
return perDay;
}
public void setPerDay(List<Long> perDay) {
this.perDay = perDay;
}
public List<Long> getPerWeek() {
return perWeek;
}
public void setPerWeek(List<Long> perWeek) {
this.perWeek = perWeek;
}
public List<Long> getPerMonth() {
return perMonth;
}
public void setPerMonth(List<Long> perMonth) {
this.perMonth = perMonth;
}
public List<Long> getPerYear() {
return perYear;
}
public void setPerYear(List<Long> perYear) {
this.perYear = perYear;
}
}