Skip to content

Commit

Permalink
Merge branch 'order-n-benchmark' into devel
Browse files Browse the repository at this point in the history
  • Loading branch information
n1mmy committed Nov 7, 2013
2 parents 5f23b5e + c1d2c31 commit 5d64081
Show file tree
Hide file tree
Showing 8 changed files with 119 additions and 80 deletions.
2 changes: 1 addition & 1 deletion examples/unfinished/benchmark/.meteor/release
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.6.0
0.6.5.1
149 changes: 105 additions & 44 deletions examples/unfinished/benchmark/benchmark.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
// Pick which scenario we run. Pass the 'SCENARIO' environment variable
// to change this. See 'benchmark-scenarios.js' for the list of
// scenarios.

// Pick scenario from settings.
// XXX settings now has public. could move stuff there and avoid this.
var PARAMS = {};
if (Meteor.isServer) {
if (!Meteor.settings.params)
Expand All @@ -11,6 +10,12 @@ if (Meteor.isServer) {
PARAMS = __meteor_runtime_config__.PARAMS;
}


// id for this client or server.
var processId = Random.id();
console.log("processId", processId);


//////////////////////////////
// Helper Functions
//////////////////////////////
Expand All @@ -36,9 +41,7 @@ var pickCollection = function () {

var generateDoc = function () {
var ret = {};
ret.bucket = random(PARAMS.numBuckets);
// XXX trusting client clock is wrong!!
ret.when = +(new Date);
ret.fromProcess = processId;
_.times(PARAMS.documentNumFields, function (n) {
ret['Field' + n] = randomString(PARAMS.documentSize/PARAMS.documentNumFields);
});
Expand All @@ -59,18 +62,36 @@ _.times(PARAMS.numCollections, function (n) {


if (Meteor.isServer) {

// Make sure we have indexes. Helps mongo CPU usage.
Meteor.startup(function () {
// clear all the collections.
_.each(Collections, function (C) {
C.remove({});
C._ensureIndex({toProcess: 1});
C._ensureIndex({fromProcess: 1});
C._ensureIndex({when: 1});
});
});

// insert initial docs
_.times(PARAMS.initialDocuments, function () {
pickCollection().insert(generateDoc());
// periodic db check. generate a client list.
var currentClients = [];
var totalDocs = 0;
Meteor.setInterval(function () {
var newClients = {};
var newTotal = 0;
// XXX hardcoded time
var since = +(new Date) - 1000*PARAMS.insertsPerSecond * 5;
_.each(Collections, function (C) {
_.each(C.find({when: {$gt: since}}, {fields: {fromProcess: 1, when: 1}}).fetch(), function (d) {
newTotal += 1;
if (d.fromProcess && d.when > since)
newClients[d.fromProcess] = true;
});
});
});
currentClients = _.keys(newClients);
totalDocs = newTotal;
}, 3*1000); // XXX hardcoded time

// periodic document cleanup.
if (PARAMS.maxAgeSeconds) {
Meteor.setInterval(function () {
var when = +(new Date) - PARAMS.maxAgeSeconds*1000;
Expand All @@ -81,19 +102,60 @@ if (Meteor.isServer) {
}, 1000*PARAMS.maxAgeSeconds / 20);
}

Meteor.publish("data", function (collection, bucket) {
Meteor.publish("data", function (collection, process) {
check(collection, Number);
check(process, String);
var C = Collections[collection];
return C.find({bucket: bucket});
return C.find({toProcess: process});
});

Meteor.methods({
'insert': function (doc) {
check(doc, Object);
check(doc.fromProcess, String);
// pick a random destination. send to ourselves if there is no one
// else. by having an entry in the db, we'll end up in the target
// list.
doc.toProcess = Random.choice(currentClients) || doc.fromProcess;

doc.when = +(new Date);

var C = pickCollection();
C.insert(doc);
},
update: function (processId, field, value) {
check([processId, field, value], [String]);
var modifer = {};
modifer[field] = value; // XXX injection attack?

var C = pickCollection();
// update one message.
C.update({fromProcess: processId}, {$set: modifer}, {multi: false});
},
remove: function (processId) {
check(processId, String);
var C = pickCollection();
// remove one message.
var obj = C.findOne({fromProcess: processId});
if (obj)
C.remove(obj._id);
}
});


// XXX publish stats
// - currentClients.length
// - serverId
// - num ddp sessions
// - total documents
}



if (Meteor.isClient) {
// sub to data
_.times(PARAMS.numCollections, function (n) {
Meteor.subscribe("data", n, random(PARAMS.numBuckets));
Meteor.subscribe("data", n, processId);
});

// templates
Expand All @@ -108,60 +170,59 @@ if (Meteor.isClient) {
};

Template.status.updateRate = function () {
return Session.get('updateRate') + ", " + Session.get('updateAvg');
return (Session.get('updateAvgs') || []).join(", ");
};

// XXX count of how many docs are in local collection?


// do stuff periodically

if (PARAMS.insertsPerSecond) {
Meteor.setInterval(function () {
pickCollection().insert(generateDoc());
Meteor.call('insert', generateDoc());
}, 1000 / PARAMS.insertsPerSecond);
}

if (PARAMS.removesPerSecond) {
if (PARAMS.updatesPerSecond) {
Meteor.setInterval(function () {
var C = pickCollection();
var docs = C.find({}).fetch();
var doc = Random.choice(docs);
if (doc)
C.remove(doc._id);
}, 1000 / PARAMS.removesPerSecond);
Meteor.call('update',
processId,
'Field' + random(PARAMS.documentNumFields),
randomString(PARAMS.documentSize/PARAMS.documentNumFields)
);
}, 1000 / PARAMS.updatesPerSecond);
}

if (PARAMS.updatesPerSecond) {
if (PARAMS.removesPerSecond) {
Meteor.setInterval(function () {
var C = pickCollection();
var docs = C.find({}).fetch();
var doc = Random.choice(docs);
if (doc) {
var field = 'Field' + random(PARAMS.documentNumFields);
var modifer = {};
modifer[field] =
randomString(PARAMS.documentSize/PARAMS.documentNumFields);
C.update(doc._id, {$set: modifer});
}
}, 1000 / PARAMS.updatesPerSecond);
Meteor.call('remove', processId);
}, 1000 / PARAMS.removesPerSecond);
}



// XXX very rough per client update rate. we need to measure this
// better. ideally, on the server we could get the global update rate
var updateCount = 0;
var updateHistory = [];
var updateHistories = {1: [], 10: [], 100: [], 1000: []};
var updateFunc = function () { updateCount += 1; };
_.each(Collections, function (C) {
C.find({}).observe({
C.find({}).observeChanges({
added: updateFunc, changed: updateFunc, removed: updateFunc
});
});
Meteor.setInterval(function () {
updateHistory.push(updateCount);
if (updateHistory.length > 10)
updateHistory.shift();
Session.set('updateRate', updateCount);
Session.set('updateAvg', _.reduce(updateHistory, function(memo, num){
return memo + num; }, 0) / updateHistory.length);;
_.each(updateHistories, function (h, max) {
h.push(updateCount);
if (h.length > max)
h.shift();
});
Session.set('updateAvgs', _.map(updateHistories, function (h) {
return _.reduce(h, function(memo, num) {
return memo + num;
}, 0) / h.length;
}));;
updateCount = 0;
}, 1000);

Expand Down
10 changes: 0 additions & 10 deletions examples/unfinished/benchmark/scenarios/README.md
Original file line number Diff line number Diff line change
@@ -1,15 +1,7 @@
Parameters for simulation:

Each document is randomly placed in a collection, with a random
'bucket' field. Clients sub to 1 bucket in each collection.

- numCollections
how many collections to spread the documents over
- numBuckets
number of buckets per collection.

- initialDocuments: Inital documents added by the server. Probably
not usefully combined with maxAgeSeconds

- maxAgeSeconds: How long to leave documents in the database. This,
combined with all the various rates, determines the steady state
Expand All @@ -23,5 +15,3 @@ Per-client action rates:
- documentSize: bytes of randomness per document.
// XXX make this a random distribution?
- documentNumFields: how many fields of randomness per document.

XXX also max documents? (count and remove N)
9 changes: 0 additions & 9 deletions examples/unfinished/benchmark/scenarios/bigdata-static.json

This file was deleted.

10 changes: 0 additions & 10 deletions examples/unfinished/benchmark/scenarios/bigdata-updates.json

This file was deleted.

4 changes: 1 addition & 3 deletions examples/unfinished/benchmark/scenarios/default.json
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
{
{
"params": {
"numCollections": 1,
"numBuckets": 3,
"initialDocuments": 1,
"maxAgeSeconds": 60,
"insertsPerSecond": 1,
"updatesPerSecond": 1,
Expand Down
11 changes: 11 additions & 0 deletions examples/unfinished/benchmark/scenarios/fast.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"params": {
"numCollections": 1,
"maxAgeSeconds": 60,
"insertsPerSecond": 5,
"updatesPerSecond": 5,
"removesPerSecond": 1,
"documentSize": 128,
"documentNumFields": 2
}
}
4 changes: 1 addition & 3 deletions examples/unfinished/benchmark/scenarios/nodata.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
{
"params": {
"numCollections": 1,
"numBuckets": 1,
"initialDocuments": 0
"numCollections": 1
}
}

0 comments on commit 5d64081

Please sign in to comment.