Merge branch 'soon' into staging

pull/1/head
ansuz 5 years ago
commit eddbe80eef

@ -20,22 +20,36 @@ var getFileDescriptorLimit = function (env, server, cb) {
}; };
var getCacheStats = function (env, server, cb) { var getCacheStats = function (env, server, cb) {
var metaSize = 0;
var channelSize = 0;
var metaCount = 0; var metaCount = 0;
var channelCount = 0; var channelCount = 0;
try {
var meta = env.metadata_cache; var meta = env.metadata_cache;
for (var x in meta) { for (var x in meta) {
if (meta.hasOwnProperty(x)) { metaCount++; } if (meta.hasOwnProperty(x)) {
metaCount++;
metaSize += JSON.stringify(meta[x]).length;
}
} }
var channels = env.channel_cache; var channels = env.channel_cache;
for (var y in channels) { for (var y in channels) {
if (channels.hasOwnProperty(y)) { channelCount++; } if (channels.hasOwnProperty(y)) {
channelCount++;
channelSize += JSON.stringify(channels[y]).length;
}
}
} catch (err) {
return void cb(err && err.message);
} }
cb(void 0, { cb(void 0, {
metadata: metaCount, metadata: metaCount,
metaSize: metaSize,
channel: channelCount, channel: channelCount,
channelSize: channelSize,
}); });
}; };

@ -36,7 +36,10 @@ Quota.applyCustomLimits = function (Env) {
Quota.updateCachedLimits = function (Env, cb) { Quota.updateCachedLimits = function (Env, cb) {
Quota.applyCustomLimits(Env); Quota.applyCustomLimits(Env);
if (Env.allowSubscriptions === false || Env.blockDailyCheck === true) { return void cb(); } if (Env.blockDailyCheck === true ||
(typeof(Env.blockDailyCheck) === 'undefined' && Env.adminEmail === false && Env.allowSubscriptions === false)) {
return void cb();
}
var body = JSON.stringify({ var body = JSON.stringify({
domain: Env.myDomain, domain: Env.myDomain,

@ -235,6 +235,54 @@ const getIndex = (Env, channelName, cb) => {
}); });
}; };
/* checkOffsetMap
Sorry for the weird function --ansuz
This should be almost equivalent to `Object.keys(map).length` except
that is will use less memory by not allocating space for the temporary array.
Beyond that, it returns length * -1 if any of the members of the map
are not in ascending order. The function for removing older members of the map
loops over elements in order and deletes them, so ordering is important!
*/
var checkOffsetMap = function (map) {
var prev = 0;
var cur;
var ooo = 0; // out of order
var count = 0;
for (let k in map) {
count++;
cur = map[k];
if (!ooo && prev > cur) { ooo = true; }
prev = cur;
}
return ooo ? count * -1: count;
};
/* Pass the map and the number of elements it contains */
var trimOffsetByOrder = function (map, n) {
var toRemove = Math.max(n - 50, 0);
var i = 0;
for (let k in map) {
if (i >= toRemove) { return; }
i++;
delete map[k];
}
};
/* Remove from the map any byte offsets which are below
the lowest offset you'd like to preserve
(probably the oldest checkpoint */
var trimMapByOffset = function (map, offset) {
if (!offset) { return; }
for (let k in map) {
if (map[k] < offset) {
delete map[k];
}
}
};
/* storeMessage /* storeMessage
* channel id * channel id
* the message to store * the message to store
@ -286,17 +334,28 @@ const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) {
if (typeof (index.line) === "number") { index.line++; } if (typeof (index.line) === "number") { index.line++; }
if (isCp) { if (isCp) {
index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0);
for (let k in index.offsetByHash) { trimMapByOffset(index.offsetByHash, index.cpIndex[0]);
if (index.offsetByHash[k] < index.cpIndex[0]) {
delete index.offsetByHash[k];
}
}
index.cpIndex.push({ index.cpIndex.push({
offset: index.size, offset: index.size,
line: ((index.line || 0) + 1) line: ((index.line || 0) + 1)
}); });
} }
if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } if (optionalMessageHash) {
index.offsetByHash[optionalMessageHash] = index.size;
index.offsets++;
}
if (index.offsets >= 100 && !index.cpIndex.length) {
let offsetCount = checkOffsetMap(index.offsetByHash);
if (offsetCount < 0) {
Log.warn('OFFSET_TRIM_OOO', {
channel: id,
map: index.OffsetByHash
});
} else if (offsetCount > 0) {
trimOffsetByOrder(index.offsetByHash, index.offsets);
index.offsets = checkOffsetMap(index.offsetByHash);
}
}
index.size += msgBin.length; index.size += msgBin.length;
// handle the next element in the queue // handle the next element in the queue

@ -118,6 +118,7 @@ const computeIndex = function (data, cb) {
const CB = Util.once(cb); const CB = Util.once(cb);
const offsetByHash = {}; const offsetByHash = {};
let offsetCount = 0;
let size = 0; let size = 0;
nThen(function (w) { nThen(function (w) {
// iterate over all messages in the channel log // iterate over all messages in the channel log
@ -151,6 +152,8 @@ const computeIndex = function (data, cb) {
// so clear the buffer every time you see a new one // so clear the buffer every time you see a new one
messageBuf = []; messageBuf = [];
} }
} else if (messageBuf.length > 100 && cpIndex.length === 0) {
messageBuf = messageBuf.slice(0, 50);
} }
// if it's not metadata or a checkpoint then it should be a regular message // if it's not metadata or a checkpoint then it should be a regular message
// store it in the buffer // store it in the buffer
@ -163,6 +166,7 @@ const computeIndex = function (data, cb) {
} }
// once indexing is complete you should have a buffer of messages since the latest checkpoint // once indexing is complete you should have a buffer of messages since the latest checkpoint
// or the 50-100 latest messages if the channel is of a type without checkpoints.
// map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients // map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients
messageBuf.forEach((msgObj) => { messageBuf.forEach((msgObj) => {
const msg = HK.tryParse(Env, msgObj.buff.toString('utf8')); const msg = HK.tryParse(Env, msgObj.buff.toString('utf8'));
@ -171,6 +175,7 @@ const computeIndex = function (data, cb) {
// msgObj.offset is API guaranteed by our storage module // msgObj.offset is API guaranteed by our storage module
// it should always be a valid positive integer // it should always be a valid positive integer
offsetByHash[HK.getHash(msg[4])] = msgObj.offset; offsetByHash[HK.getHash(msg[4])] = msgObj.offset;
offsetCount++;
} }
// There is a trailing \n at the end of the file // There is a trailing \n at the end of the file
size = msgObj.offset + msgObj.buff.length + 1; size = msgObj.offset + msgObj.buff.length + 1;
@ -182,6 +187,7 @@ const computeIndex = function (data, cb) {
// Only keep the checkpoints included in the last 100 messages // Only keep the checkpoints included in the last 100 messages
cpIndex: HK.sliceCpIndex(cpIndex, i), cpIndex: HK.sliceCpIndex(cpIndex, i),
offsetByHash: offsetByHash, offsetByHash: offsetByHash,
offsets: offsetCount,
size: size, size: size,
//metadata: metadata, //metadata: metadata,
line: i line: i

@ -0,0 +1,188 @@
/* globals process */
var Client = require("../../lib/client/");
var Crypto = require("../../www/bower_components/chainpad-crypto");
var Mailbox = Crypto.Mailbox;
var Nacl = require("tweetnacl/nacl-fast");
var nThen = require("nthen");
var Pinpad = require("../../www/common/pinpad");
var Rpc = require("../../www/common/rpc");
var Hash = require("../../www/common/common-hash");
var CpNetflux = require("../../www/bower_components/chainpad-netflux");
var Util = require("../../lib/common-util");
var createMailbox = function (config, cb) {
var webchannel;
CpNetflux.start({
network: config.network,
channel: config.channel,
crypto: config.crypto,
owners: [ config.edPublic ],
noChainPad: true,
onConnect: function (wc /*, sendMessage */) {
webchannel = wc;
},
onMessage: function (/* msg, user, vKey, isCp, hash, author */) {
},
onReady: function () {
cb(void 0, webchannel);
},
});
};
process.on('unhandledRejection', function (err) {
console.error(err);
});
var makeCurveKeys = function () {
var pair = Nacl.box.keyPair();
return {
curvePrivate: Nacl.util.encodeBase64(pair.secretKey),
curvePublic: Nacl.util.encodeBase64(pair.publicKey),
};
};
var makeEdKeys = function () {
var keys = Nacl.sign.keyPair.fromSeed(Nacl.randomBytes(Nacl.sign.seedLength));
return {
edPrivate: Nacl.util.encodeBase64(keys.secretKey),
edPublic: Nacl.util.encodeBase64(keys.publicKey),
};
};
var createUser = function (config, cb) {
// config should contain keys for a team rpc (ed)
// teamEdKeys
// rosterHash
var user;
nThen(function (w) {
Client.create(w(function (err, client) {
if (err) {
w.abort();
return void cb(err);
}
user = client;
user.destroy = Util.mkEvent(true);
user.destroy.reg(function () {
user.network.disconnect();
});
}));
}).nThen(function (w) {
// make all the parameters you'll need
var network = user.network = user.config.network;
user.edKeys = makeEdKeys();
user.curveKeys = makeCurveKeys();
user.mailbox = Mailbox.createEncryptor(user.curveKeys);
user.mailboxChannel = Hash.createChannelId();
// create an anon rpc for alice
Rpc.createAnonymous(network, w(function (err, rpc) {
if (err) {
w.abort();
user.shutdown();
return void console.error('ANON_RPC_CONNECT_ERR');
}
user.anonRpc = rpc;
user.destroy.reg(function () {
user.anonRpc.destroy();
});
}));
Pinpad.create(network, user.edKeys, w(function (err, rpc) {
if (err) {
w.abort();
user.shutdown();
console.error(err);
return console.log('RPC_CONNECT_ERR');
}
user.rpc = rpc;
user.destroy.reg(function () {
user.rpc.destroy();
});
}));
}).nThen(function (w) {
// create and subscribe to your mailbox
createMailbox({
network: user.network,
channel: user.mailboxChannel,
crypto: user.mailbox,
edPublic: user.edKeys.edPublic,
}, w(function (err /*, wc*/) {
if (err) {
w.abort();
console.error("Mailbox creation error");
process.exit(1);
}
//wc.leave();
}));
}).nThen(function () {
user.cleanup = function (cb) {
//console.log("Destroying user");
// TODO remove your mailbox
user.destroy.fire();
cb = cb;
};
cb(void 0, user);
});
};
var alice;
var sharedConfig = {
teamEdKeys: makeEdKeys(),
teamCurveKeys: makeCurveKeys(),
rosterSeed: Crypto.Team.createSeed(),
};
nThen(function (w) {
createUser(sharedConfig, w(function (err, _alice) {
if (err) {
w.abort();
return void console.log(err);
}
alice = _alice;
alice.name = 'alice';
}));
/*
createUser(sharedConfig, w(function (err, _bob) {
if (err) {
w.abort();
return void console.log(err);
}
bob = _bob;
bob.name = 'bob';
}));*/
}).nThen(function (w) {
var i = 0;
var next = w();
var send = function () {
if (i++ >= 300) { return next(); }
var msg = alice.mailbox.encrypt(JSON.stringify({
pewpew: 'bangbang',
}), alice.curveKeys.curvePublic);
alice.anonRpc.send('WRITE_PRIVATE_MESSAGE', [
alice.mailboxChannel,
msg
//Nacl.util.encodeBase64(Nacl.randomBytes(128))
], w(function (err) {
if (err) { throw new Error(err); }
console.log('message %s written successfully', i);
setTimeout(send, 250);
}));
};
send();
}).nThen(function () {
alice.cleanup();
//bob.cleanup();
});

@ -601,6 +601,8 @@ define([
restoreMediaTags(userDocStateDom, mediaTagMap); restoreMediaTags(userDocStateDom, mediaTagMap);
cursors.removeCursors(inner);
// Deal with adjasent text nodes // Deal with adjasent text nodes
userDocStateDom.normalize(); userDocStateDom.normalize();
inner.normalize(); inner.normalize();
@ -809,8 +811,8 @@ define([
}); });
/* Display the cursor of other users and send our cursor */ /* Display the cursor of other users and send our cursor */
//framework.setCursorGetter(cursors.cursorGetter); framework.setCursorGetter(cursors.cursorGetter);
//framework.onCursorUpdate(cursors.onCursorUpdate); framework.onCursorUpdate(cursors.onCursorUpdate);
inner.addEventListener('click', updateCursor); inner.addEventListener('click', updateCursor);
inner.addEventListener('keyup', updateCursor); inner.addEventListener('keyup', updateCursor);

Loading…
Cancel
Save