Merge branch 'staging' into communities-trim
commit
c9676e1c91
|
@ -231,26 +231,17 @@ module.exports = {
|
|||
*/
|
||||
inactiveTime: 90, // days
|
||||
|
||||
/* CryptPad can be configured to remove inactive data which has not been pinned.
|
||||
* Deletion of data is always risky and as an operator you have the choice to
|
||||
* archive data instead of deleting it outright. Set this value to true if
|
||||
* you want your server to archive files and false if you want to keep using
|
||||
* the old behaviour of simply removing files.
|
||||
/* CryptPad archives some data instead of deleting it outright.
|
||||
* This archived data still takes up space and so you'll probably still want to
|
||||
* remove these files after a brief period.
|
||||
*
|
||||
* cryptpad/scripts/evict-inactive.js is intended to be run daily
|
||||
* from a crontab or similar scheduling service.
|
||||
*
|
||||
* WARNING: this is not implemented universally, so at the moment this will
|
||||
* only apply to the removal of 'channels' due to inactivity.
|
||||
*/
|
||||
retainData: true,
|
||||
|
||||
/* As described above, CryptPad offers the ability to archive some data
|
||||
* instead of deleting it outright. This archived data still takes up space
|
||||
* and so you'll probably still want to remove these files after a brief period.
|
||||
* The intent with this feature is to provide a safety net in case of accidental
|
||||
* deletion. Set this value to the number of days you'd like to retain
|
||||
* archived data before it's removed permanently.
|
||||
*
|
||||
* If 'retainData' is set to false, there will never be any archived data
|
||||
* to remove.
|
||||
*/
|
||||
archiveRetentionTime: 15,
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
@alertify-input-fg: @colortheme_modal-input-fg;
|
||||
|
||||
input:not(.form-control), textarea {
|
||||
background-color: @alertify-input-fg;
|
||||
// background-color: @alertify-input-fg;
|
||||
color: @cryptpad_text_col;
|
||||
border: 1px solid @alertify-input-bg;
|
||||
width: 100%;
|
||||
|
@ -23,6 +23,10 @@
|
|||
}
|
||||
}
|
||||
|
||||
input:not(.form-control) {
|
||||
height: @variables_input-height;
|
||||
}
|
||||
|
||||
div.cp-alertify-type {
|
||||
display: flex;
|
||||
input {
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
--LessLoader_require: LessLoader_currentFile();
|
||||
};
|
||||
& {
|
||||
@corner-button-ok: #2c9b00;
|
||||
@corner-button-cancel: #990000;
|
||||
@corner-link: #ffff7a;
|
||||
@corner-blue: @colortheme_logo-1;
|
||||
@corner-white: @colortheme_base;
|
||||
|
||||
@keyframes appear {
|
||||
0% {
|
||||
|
@ -27,21 +27,23 @@
|
|||
|
||||
.cp-corner-container {
|
||||
position: absolute;
|
||||
right: 0;
|
||||
bottom: 0;
|
||||
width: 300px;
|
||||
height: 200px;
|
||||
border-top-left-radius: 200px;
|
||||
padding: 15px;
|
||||
text-align: right;
|
||||
background-color: @colortheme_logo-1;
|
||||
color: @colortheme_base;
|
||||
right: 10px;
|
||||
bottom: 10px;
|
||||
width: 350px;
|
||||
padding: 10px;
|
||||
background-color: fade(@corner-blue, 95%);
|
||||
border: 1px solid @corner-blue;
|
||||
color: @corner-white;
|
||||
z-index: 9999;
|
||||
transform-origin: bottom right;
|
||||
animation: appear 0.8s ease-in-out;
|
||||
box-shadow: 0 0 10px 0 @colortheme_logo-1;
|
||||
//transform: scale(0.1);
|
||||
//transform: scale(1);
|
||||
//box-shadow: 0 0 10px 0 @corner-blue;
|
||||
|
||||
&.cp-corner-alt {
|
||||
background-color: fade(@corner-white, 95%);
|
||||
border: 1px solid @corner-blue;
|
||||
color: @corner-blue;
|
||||
}
|
||||
|
||||
h1, h2, h3 {
|
||||
font-size: 1.5em;
|
||||
|
@ -64,7 +66,7 @@
|
|||
line-height: 15px;
|
||||
display: none;
|
||||
&:hover {
|
||||
color: darken(@colortheme_base, 15%);
|
||||
color: darken(@corner-white, 15%);
|
||||
}
|
||||
}
|
||||
.cp-corner-minimize {
|
||||
|
@ -86,46 +88,95 @@
|
|||
}
|
||||
}
|
||||
&.cp-corner-big {
|
||||
width: 400px;
|
||||
height: 250px;
|
||||
width: 500px;
|
||||
}
|
||||
|
||||
.cp-corner-dontshow {
|
||||
cursor: pointer;
|
||||
.fa {
|
||||
margin-right: 0.3em;
|
||||
font-size: 1.1em;
|
||||
}
|
||||
&:hover {
|
||||
color: darken(@corner-white, 10%);
|
||||
}
|
||||
}
|
||||
&.cp-corner-alt {
|
||||
.cp-corner-dontshow {
|
||||
&:hover {
|
||||
color: lighten(@corner-blue, 10%);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.cp-corner-actions {
|
||||
min-height: 30px;
|
||||
margin: 15px auto;
|
||||
display: inline-block;
|
||||
margin: 10px auto;
|
||||
display: block;
|
||||
text-align: right;
|
||||
}
|
||||
.cp-corner-footer {
|
||||
font-style: italic;
|
||||
font-size: 0.8em;
|
||||
}
|
||||
.cp-corner-footer, .cp-corner-text {
|
||||
a {
|
||||
color: @corner-link;
|
||||
color: @corner-white;
|
||||
text-decoration: underline;
|
||||
&:hover {
|
||||
color: darken(@corner-link, 20%);
|
||||
color: darken(@corner-white, 10%);
|
||||
}
|
||||
}
|
||||
}
|
||||
&.cp-corner-alt a {
|
||||
color: @corner-blue;
|
||||
&:hover {
|
||||
color: lighten(@corner-blue, 10%);
|
||||
}
|
||||
}
|
||||
|
||||
button {
|
||||
border: 0px;
|
||||
padding: 5px;
|
||||
color: @colortheme_base;
|
||||
margin-left: 5px;
|
||||
color: @corner-white;
|
||||
&:not(:first-child) {
|
||||
margin-left: 10px;
|
||||
}
|
||||
outline: none;
|
||||
text-transform: uppercase;
|
||||
border: 1px solid @corner-white;
|
||||
.fa, .cptools {
|
||||
margin-right: 0.3em;
|
||||
}
|
||||
&.cp-corner-primary {
|
||||
background-color: @corner-button-ok;
|
||||
font-weight: bold;
|
||||
background-color: @corner-white;
|
||||
color: @corner-blue;
|
||||
&:hover {
|
||||
background-color: lighten(@corner-button-ok, 10%);
|
||||
background-color: lighten(@corner-blue, 50%);
|
||||
border-color: lighten(@corner-blue, 50%);
|
||||
}
|
||||
}
|
||||
&.cp-corner-cancel {
|
||||
background-color: @corner-button-cancel;
|
||||
margin-left: 10px;
|
||||
background-color: @corner-blue;
|
||||
color: @corner-white;
|
||||
&:hover {
|
||||
background-color: lighten(@corner-button-cancel, 10%);
|
||||
background-color: darken(@corner-blue, 10%);
|
||||
}
|
||||
}
|
||||
}
|
||||
&.cp-corner-alt button {
|
||||
border-color: @corner-blue;
|
||||
&.cp-corner-primary {
|
||||
background-color: @corner-blue;
|
||||
color: @corner-white;
|
||||
&:hover {
|
||||
background-color: darken(@corner-blue, 10%);
|
||||
border-color: darken(@corner-blue, 10%);
|
||||
}
|
||||
}
|
||||
&.cp-corner-cancel {
|
||||
background-color: @corner-white;
|
||||
color: @corner-blue;
|
||||
&:hover {
|
||||
background-color: lighten(@corner-blue, 50%);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
@notif-height: 50px;
|
||||
.cp-notifications-container {
|
||||
max-width: 300px;
|
||||
width: 300px;
|
||||
display: flex;
|
||||
flex-flow: column;
|
||||
& hr {
|
||||
|
@ -16,6 +17,14 @@
|
|||
.cp-notification {
|
||||
min-height: @notif-height;
|
||||
display: flex;
|
||||
.cp-avatar {
|
||||
.avatar_main(30px);
|
||||
padding: 0 5px;
|
||||
cursor: pointer;
|
||||
&:hover {
|
||||
background-color: rgba(0,0,0,0.1);
|
||||
}
|
||||
}
|
||||
.cp-notification-content {
|
||||
flex: 1;
|
||||
align-items: stretch;
|
||||
|
|
|
@ -117,6 +117,7 @@
|
|||
//border-radius: 0 0.25em 0.25em 0;
|
||||
//border: 1px solid #adadad;
|
||||
border-left: 0px;
|
||||
height: @variables_input-height;
|
||||
}
|
||||
}
|
||||
&>div {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
// Elements size
|
||||
@variables_bar-height: 32px;
|
||||
@variables_input-height: 38px;
|
||||
|
||||
// Used in modal.less and alertify.less
|
||||
@variables_padding: 12px;
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/* jshint esversion: 6 */
|
||||
const nThen = require("nthen");
|
||||
const WebSocketServer = require('ws').Server;
|
||||
const NetfluxSrv = require('chainpad-server');
|
||||
|
||||
module.exports.create = function (config) {
|
||||
const wsConfig = {
|
||||
server: config.httpServer,
|
||||
};
|
||||
|
||||
nThen(function (w) {
|
||||
require('../storage/file').create(config, w(function (_store) {
|
||||
config.store = _store;
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// XXX embed this in historyKeeper
|
||||
require("../storage/tasks").create(config, w(function (e, tasks) {
|
||||
if (e) {
|
||||
throw e;
|
||||
}
|
||||
config.tasks = tasks;
|
||||
if (config.disableIntegratedTasks) { return; }
|
||||
|
||||
config.intervals = config.intervals || {};
|
||||
config.intervals.taskExpiration = setInterval(function () {
|
||||
tasks.runAll(function (err) {
|
||||
if (err) {
|
||||
// either TASK_CONCURRENCY or an error with tasks.list
|
||||
// in either case it is already logged.
|
||||
}
|
||||
});
|
||||
}, 1000 * 60 * 5); // run every five minutes
|
||||
}));
|
||||
}).nThen(function () {
|
||||
// asynchronously create a historyKeeper and RPC together
|
||||
require('./historyKeeper.js').create(config, function (err, historyKeeper) {
|
||||
if (err) { throw err; }
|
||||
|
||||
var log = config.log;
|
||||
|
||||
// spawn ws server and attach netflux event handlers
|
||||
NetfluxSrv.create(new WebSocketServer(wsConfig))
|
||||
.on('channelClose', historyKeeper.channelClose)
|
||||
.on('channelMessage', historyKeeper.channelMessage)
|
||||
.on('channelOpen', historyKeeper.channelOpen)
|
||||
.on('sessionClose', function (userId, reason) {
|
||||
if (['BAD_MESSAGE', 'SOCKET_ERROR', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) {
|
||||
return void log.error('SESSION_CLOSE_WITH_ERROR', {
|
||||
userId: userId,
|
||||
reason: reason,
|
||||
});
|
||||
}
|
||||
log.verbose('SESSION_CLOSE_ROUTINE', {
|
||||
userId: userId,
|
||||
reason: reason,
|
||||
});
|
||||
})
|
||||
.on('error', function (error, label, info) {
|
||||
if (!error) { return; }
|
||||
/* labels:
|
||||
SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT,
|
||||
FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE,
|
||||
NETFLUX_WEBSOCKET_ERROR
|
||||
*/
|
||||
log.error(label, {
|
||||
code: error.code,
|
||||
message: error.message,
|
||||
stack: error.stack,
|
||||
info: info,
|
||||
});
|
||||
})
|
||||
.register(historyKeeper.id, historyKeeper.directMessage);
|
||||
});
|
||||
});
|
||||
};
|
|
@ -0,0 +1,119 @@
|
|||
/*jshint esversion: 6 */
|
||||
const BatchRead = require("../batch-read");
|
||||
const nThen = require("nthen");
|
||||
const getFolderSize = require("get-folder-size");
|
||||
var Fs = require("fs");
|
||||
|
||||
var Admin = module.exports;
|
||||
|
||||
var getActiveSessions = function (Env, Server, cb) {
|
||||
var stats = Server.getSessionStats();
|
||||
cb(void 0, [
|
||||
stats.total,
|
||||
stats.unique
|
||||
]);
|
||||
};
|
||||
|
||||
var shutdown = function (Env, Server, cb) {
|
||||
if (true) {
|
||||
return void cb('E_NOT_IMPLEMENTED');
|
||||
}
|
||||
|
||||
// disconnect all users and reject new connections
|
||||
Server.shutdown();
|
||||
|
||||
// stop all intervals that may be running
|
||||
Object.keys(Env.intervals).forEach(function (name) {
|
||||
clearInterval(Env.intervals[name]);
|
||||
});
|
||||
|
||||
// set a flag to prevent incoming database writes
|
||||
// wait until all pending writes are complete
|
||||
// then process.exit(0);
|
||||
// and allow system functionality to restart the server
|
||||
};
|
||||
|
||||
const batchRegisteredUsers = BatchRead("GET_REGISTERED_USERS");
|
||||
var getRegisteredUsers = function (Env, cb) {
|
||||
batchRegisteredUsers('', cb, function (done) {
|
||||
var dir = Env.paths.pin;
|
||||
var folders;
|
||||
var users = 0;
|
||||
nThen(function (waitFor) {
|
||||
Fs.readdir(dir, waitFor(function (err, list) {
|
||||
if (err) {
|
||||
waitFor.abort();
|
||||
return void done(err);
|
||||
}
|
||||
folders = list;
|
||||
}));
|
||||
}).nThen(function (waitFor) {
|
||||
folders.forEach(function (f) {
|
||||
var dir = Env.paths.pin + '/' + f;
|
||||
Fs.readdir(dir, waitFor(function (err, list) {
|
||||
if (err) { return; }
|
||||
users += list.length;
|
||||
}));
|
||||
});
|
||||
}).nThen(function () {
|
||||
done(void 0, users);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const batchDiskUsage = BatchRead("GET_DISK_USAGE");
|
||||
var getDiskUsage = function (Env, cb) {
|
||||
batchDiskUsage('', cb, function (done) {
|
||||
var data = {};
|
||||
nThen(function (waitFor) {
|
||||
getFolderSize('./', waitFor(function(err, info) {
|
||||
data.total = info;
|
||||
}));
|
||||
getFolderSize(Env.paths.pin, waitFor(function(err, info) {
|
||||
data.pin = info;
|
||||
}));
|
||||
getFolderSize(Env.paths.blob, waitFor(function(err, info) {
|
||||
data.blob = info;
|
||||
}));
|
||||
getFolderSize(Env.paths.staging, waitFor(function(err, info) {
|
||||
data.blobstage = info;
|
||||
}));
|
||||
getFolderSize(Env.paths.block, waitFor(function(err, info) {
|
||||
data.block = info;
|
||||
}));
|
||||
getFolderSize(Env.paths.data, waitFor(function(err, info) {
|
||||
data.datastore = info;
|
||||
}));
|
||||
}).nThen(function () {
|
||||
done(void 0, data);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Admin.command = function (Env, Server, publicKey, data, cb) {
|
||||
var admins = Env.admins;
|
||||
if (admins.indexOf(publicKey) === -1) {
|
||||
return void cb("FORBIDDEN");
|
||||
}
|
||||
|
||||
// Handle commands here
|
||||
switch (data[0]) {
|
||||
case 'ACTIVE_SESSIONS':
|
||||
return getActiveSessions(Env, Server, cb);
|
||||
case 'ACTIVE_PADS':
|
||||
return cb(void 0, Server.getActiveChannelCount());
|
||||
case 'REGISTERED_USERS':
|
||||
return getRegisteredUsers(Env, cb);
|
||||
case 'DISK_USAGE':
|
||||
return getDiskUsage(Env, cb);
|
||||
case 'FLUSH_CACHE':
|
||||
Env.flushCache();
|
||||
return cb(void 0, true);
|
||||
case 'SHUTDOWN':
|
||||
return shutdown(Env, Server, cb);
|
||||
default:
|
||||
return cb('UNHANDLED_ADMIN_COMMAND');
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,172 @@
|
|||
/*jshint esversion: 6 */
|
||||
/* globals Buffer*/
|
||||
var Block = module.exports;
|
||||
|
||||
const Fs = require("fs");
|
||||
const Fse = require("fs-extra");
|
||||
const Path = require("path");
|
||||
const Nacl = require("tweetnacl/nacl-fast");
|
||||
const nThen = require("nthen");
|
||||
|
||||
const Util = require("../common-util");
|
||||
|
||||
/*
|
||||
We assume that the server is secured against MitM attacks
|
||||
via HTTPS, and that malicious actors do not have code execution
|
||||
capabilities. If they do, we have much more serious problems.
|
||||
|
||||
The capability to replay a block write or remove results in either
|
||||
a denial of service for the user whose block was removed, or in the
|
||||
case of a write, a rollback to an earlier password.
|
||||
|
||||
Since block modification is destructive, this can result in loss
|
||||
of access to the user's drive.
|
||||
|
||||
So long as the detached signature is never observed by a malicious
|
||||
party, and the server discards it after proof of knowledge, replays
|
||||
are not possible. However, this precludes verification of the signature
|
||||
at a later time.
|
||||
|
||||
Despite this, an integrity check is still possible by the original
|
||||
author of the block, since we assume that the block will have been
|
||||
encrypted with xsalsa20-poly1305 which is authenticated.
|
||||
*/
|
||||
Block.validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS
|
||||
// convert the public key to a Uint8Array and validate it
|
||||
if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); }
|
||||
|
||||
var u8_public_key;
|
||||
try {
|
||||
u8_public_key = Nacl.util.decodeBase64(publicKey);
|
||||
} catch (e) {
|
||||
return void cb('E_INVALID_KEY');
|
||||
}
|
||||
|
||||
var u8_signature;
|
||||
try {
|
||||
u8_signature = Nacl.util.decodeBase64(signature);
|
||||
} catch (e) {
|
||||
Env.Log.error('INVALID_BLOCK_SIGNATURE', e);
|
||||
return void cb('E_INVALID_SIGNATURE');
|
||||
}
|
||||
|
||||
// convert the block to a Uint8Array
|
||||
var u8_block;
|
||||
try {
|
||||
u8_block = Nacl.util.decodeBase64(block);
|
||||
} catch (e) {
|
||||
return void cb('E_INVALID_BLOCK');
|
||||
}
|
||||
|
||||
// take its hash
|
||||
var hash = Nacl.hash(u8_block);
|
||||
|
||||
// validate the signature against the hash of the content
|
||||
var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key);
|
||||
|
||||
// existing authentication ensures that users cannot replay old blocks
|
||||
|
||||
// call back with (err) if unsuccessful
|
||||
if (!verified) { return void cb("E_COULD_NOT_VERIFY"); }
|
||||
|
||||
return void cb(null, u8_block);
|
||||
};
|
||||
|
||||
var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS
|
||||
// prepare publicKey to be used as a file name
|
||||
var safeKey = Util.escapeKeyCharacters(publicKey);
|
||||
|
||||
// validate safeKey
|
||||
if (typeof(safeKey) !== 'string') {
|
||||
return;
|
||||
}
|
||||
|
||||
// derive the full path
|
||||
// /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd
|
||||
return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey);
|
||||
};
|
||||
|
||||
Block.writeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS
|
||||
//console.log(msg);
|
||||
var publicKey = msg[0];
|
||||
var signature = msg[1];
|
||||
var block = msg[2];
|
||||
|
||||
Block.validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) {
|
||||
if (e) { return void cb(e); }
|
||||
if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); }
|
||||
|
||||
// derive the filepath
|
||||
var path = createLoginBlockPath(Env, publicKey);
|
||||
|
||||
// make sure the path is valid
|
||||
if (typeof(path) !== 'string') {
|
||||
return void cb('E_INVALID_BLOCK_PATH');
|
||||
}
|
||||
|
||||
var parsed = Path.parse(path);
|
||||
if (!parsed || typeof(parsed.dir) !== 'string') {
|
||||
return void cb("E_INVALID_BLOCK_PATH_2");
|
||||
}
|
||||
|
||||
nThen(function (w) {
|
||||
// make sure the path to the file exists
|
||||
Fse.mkdirp(parsed.dir, w(function (e) {
|
||||
if (e) {
|
||||
w.abort();
|
||||
cb(e);
|
||||
}
|
||||
}));
|
||||
}).nThen(function () {
|
||||
// actually write the block
|
||||
|
||||
// flow is dumb and I need to guard against this which will never happen
|
||||
/*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */
|
||||
/*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */
|
||||
Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) {
|
||||
if (err) { return void cb(err); }
|
||||
cb();
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/*
|
||||
When users write a block, they upload the block, and provide
|
||||
a signature proving that they deserve to be able to write to
|
||||
the location determined by the public key.
|
||||
|
||||
When removing a block, there is nothing to upload, but we need
|
||||
to sign something. Since the signature is considered sensitive
|
||||
information, we can just sign some constant and use that as proof.
|
||||
|
||||
*/
|
||||
Block.removeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS
|
||||
var publicKey = msg[0];
|
||||
var signature = msg[1];
|
||||
var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant
|
||||
|
||||
Block.validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) {
|
||||
if (e) { return void cb(e); }
|
||||
// derive the filepath
|
||||
var path = createLoginBlockPath(Env, publicKey);
|
||||
|
||||
// make sure the path is valid
|
||||
if (typeof(path) !== 'string') {
|
||||
return void cb('E_INVALID_BLOCK_PATH');
|
||||
}
|
||||
|
||||
// FIXME COLDSTORAGE
|
||||
Fs.unlink(path, function (err) {
|
||||
Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', {
|
||||
publicKey: publicKey,
|
||||
path: path,
|
||||
status: err? String(err): 'SUCCESS',
|
||||
});
|
||||
|
||||
if (err) { return void cb(err); }
|
||||
cb();
|
||||
});
|
||||
});
|
||||
};
|
||||
|
|
@ -0,0 +1,199 @@
|
|||
/*jshint esversion: 6 */
|
||||
const Channel = module.exports;
|
||||
|
||||
const Util = require("../common-util");
|
||||
const nThen = require("nthen");
|
||||
const Core = require("./core");
|
||||
const Metadata = require("./metadata");
|
||||
|
||||
Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb) {
|
||||
if (typeof(channelId) !== 'string' || channelId.length !== 32) {
|
||||
return cb('INVALID_ARGUMENTS');
|
||||
}
|
||||
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
|
||||
|
||||
Metadata.getMetadata(Env, channelId, function (err, metadata) {
|
||||
if (err) { return void cb(err); }
|
||||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); }
|
||||
// Confirm that the channel is owned by the user in question
|
||||
if (!Core.isOwner(metadata, unsafeKey)) {
|
||||
return void cb('INSUFFICIENT_PERMISSIONS');
|
||||
}
|
||||
return void Env.msgStore.clearChannel(channelId, function (e) {
|
||||
cb(e);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Channel.removeOwnedChannel = function (Env, safeKey, channelId, cb) {
|
||||
if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) {
|
||||
return cb('INVALID_ARGUMENTS');
|
||||
}
|
||||
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
|
||||
|
||||
if (Env.blobStore.isFileId(channelId)) {
|
||||
//var safeKey = Util.escapeKeyCharacters(unsafeKey);
|
||||
var blobId = channelId;
|
||||
|
||||
return void nThen(function (w) {
|
||||
// check if you have permissions
|
||||
Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) {
|
||||
if (err || !owned) {
|
||||
w.abort();
|
||||
return void cb("INSUFFICIENT_PERMISSIONS");
|
||||
}
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// remove the blob
|
||||
return void Env.blobStore.archive.blob(blobId, w(function (err) {
|
||||
Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', {
|
||||
safeKey: safeKey,
|
||||
blobId: blobId,
|
||||
status: err? String(err): 'SUCCESS',
|
||||
});
|
||||
if (err) {
|
||||
w.abort();
|
||||
return void cb(err);
|
||||
}
|
||||
}));
|
||||
}).nThen(function () {
|
||||
// archive the proof
|
||||
return void Env.blobStore.archive.proof(safeKey, blobId, function (err) {
|
||||
Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", {
|
||||
safeKey: safeKey,
|
||||
blobId: blobId,
|
||||
status: err? String(err): 'SUCCESS',
|
||||
});
|
||||
if (err) {
|
||||
return void cb("E_PROOF_REMOVAL");
|
||||
}
|
||||
cb(void 0, 'OK');
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
Metadata.getMetadata(Env, channelId, function (err, metadata) {
|
||||
if (err) { return void cb(err); }
|
||||
if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); }
|
||||
if (!Core.isOwner(metadata, unsafeKey)) {
|
||||
return void cb('INSUFFICIENT_PERMISSIONS');
|
||||
}
|
||||
// temporarily archive the file
|
||||
return void Env.msgStore.archiveChannel(channelId, function (e) {
|
||||
Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', {
|
||||
unsafeKey: unsafeKey,
|
||||
channelId: channelId,
|
||||
status: e? String(e): 'SUCCESS',
|
||||
});
|
||||
if (e) {
|
||||
return void cb(e);
|
||||
}
|
||||
cb(void 0, 'OK');
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Channel.removeOwnedChannelHistory = function (Env, channelId, unsafeKey, hash, cb) { // XXX UNSAFE
|
||||
nThen(function (w) {
|
||||
Metadata.getMetadata(Env, channelId, w(function (err, metadata) {
|
||||
if (err) { return void cb(err); }
|
||||
if (!Core.hasOwners(metadata)) {
|
||||
w.abort();
|
||||
return void cb('E_NO_OWNERS');
|
||||
}
|
||||
if (!Core.isOwner(metadata, unsafeKey)) {
|
||||
w.abort();
|
||||
return void cb("INSUFFICIENT_PERMISSIONS");
|
||||
}
|
||||
// else fall through to the next block
|
||||
}));
|
||||
}).nThen(function () {
|
||||
Env.msgStore.trimChannel(channelId, hash, function (err) {
|
||||
if (err) { return void cb(err); }
|
||||
// clear historyKeeper's cache for this channel
|
||||
Env.historyKeeper.channelClose(channelId);
|
||||
cb(void 0, 'OK');
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
var ARRAY_LINE = /^\[/;
|
||||
|
||||
/* Files can contain metadata but not content
|
||||
call back with true if the channel log has no content other than metadata
|
||||
otherwise false
|
||||
*/
|
||||
Channel.isNewChannel = function (Env, channel, cb) {
|
||||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
|
||||
if (channel.length !== 32) { return void cb('INVALID_CHAN'); }
|
||||
|
||||
var done = false;
|
||||
Env.msgStore.getMessages(channel, function (msg) {
|
||||
if (done) { return; }
|
||||
try {
|
||||
if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) {
|
||||
done = true;
|
||||
return void cb(void 0, false);
|
||||
}
|
||||
} catch (e) {
|
||||
Env.WARN('invalid message read from store', e);
|
||||
}
|
||||
}, function () {
|
||||
if (done) { return; }
|
||||
// no more messages...
|
||||
cb(void 0, true);
|
||||
});
|
||||
};
|
||||
|
||||
/* writePrivateMessage
|
||||
allows users to anonymously send a message to the channel
|
||||
prevents their netflux-id from being stored in history
|
||||
and from being broadcast to anyone that might currently be in the channel
|
||||
|
||||
Otherwise behaves the same as sending to a channel
|
||||
*/
|
||||
Channel.writePrivateMessage = function (Env, args, Server, cb) {
|
||||
var channelId = args[0];
|
||||
var msg = args[1];
|
||||
|
||||
// don't bother handling empty messages
|
||||
if (!msg) { return void cb("INVALID_MESSAGE"); }
|
||||
|
||||
// don't support anything except regular channels
|
||||
if (!Core.isValidId(channelId) || channelId.length !== 32) {
|
||||
return void cb("INVALID_CHAN");
|
||||
}
|
||||
|
||||
// We expect a modern netflux-websocket-server instance
|
||||
// if this API isn't here everything will fall apart anyway
|
||||
if (!(Server && typeof(Server.send) === 'function')) {
|
||||
return void cb("NOT_IMPLEMENTED");
|
||||
}
|
||||
|
||||
// historyKeeper expects something with an 'id' attribute
|
||||
// it will fail unless you provide it, but it doesn't need anything else
|
||||
var channelStruct = {
|
||||
id: channelId,
|
||||
};
|
||||
|
||||
// construct a message to store and broadcast
|
||||
var fullMessage = [
|
||||
0, // idk
|
||||
null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way
|
||||
"MSG", // indicate that this is a MSG
|
||||
channelId, // channel id
|
||||
msg // the actual message content. Generally a string
|
||||
];
|
||||
|
||||
// historyKeeper already knows how to handle metadata and message validation, so we just pass it off here
|
||||
// if the message isn't valid it won't be stored.
|
||||
Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage);
|
||||
|
||||
// call back with the message and the target channel.
|
||||
// historyKeeper will take care of broadcasting it if anyone is in the channel
|
||||
cb(void 0, {
|
||||
channel: channelId,
|
||||
message: fullMessage
|
||||
});
|
||||
};
|
||||
|
|
@ -0,0 +1,188 @@
|
|||
/*jshint esversion: 6 */
|
||||
/* globals process */
|
||||
const Core = module.exports;
|
||||
const Util = require("../common-util");
|
||||
const escapeKeyCharacters = Util.escapeKeyCharacters;
|
||||
|
||||
/* Use Nacl for checking signatures of messages */
|
||||
const Nacl = require("tweetnacl/nacl-fast");
|
||||
|
||||
|
||||
Core.DEFAULT_LIMIT = 50 * 1024 * 1024;
|
||||
Core.SESSION_EXPIRATION_TIME = 60 * 1000;
|
||||
|
||||
Core.isValidId = function (chan) {
|
||||
return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) &&
|
||||
[32, 48].indexOf(chan.length) > -1;
|
||||
};
|
||||
|
||||
var makeToken = Core.makeToken = function () {
|
||||
return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER))
|
||||
.toString(16);
|
||||
};
|
||||
|
||||
Core.makeCookie = function (token) {
|
||||
var time = (+new Date());
|
||||
time -= time % 5000;
|
||||
|
||||
return [
|
||||
time,
|
||||
process.pid,
|
||||
token
|
||||
];
|
||||
};
|
||||
|
||||
var parseCookie = function (cookie) {
|
||||
if (!(cookie && cookie.split)) { return null; }
|
||||
|
||||
var parts = cookie.split('|');
|
||||
if (parts.length !== 3) { return null; }
|
||||
|
||||
var c = {};
|
||||
c.time = new Date(parts[0]);
|
||||
c.pid = Number(parts[1]);
|
||||
c.seq = parts[2];
|
||||
return c;
|
||||
};
|
||||
|
||||
Core.getSession = function (Sessions, key) {
|
||||
var safeKey = escapeKeyCharacters(key);
|
||||
if (Sessions[safeKey]) {
|
||||
Sessions[safeKey].atime = +new Date();
|
||||
return Sessions[safeKey];
|
||||
}
|
||||
var user = Sessions[safeKey] = {};
|
||||
user.atime = +new Date();
|
||||
user.tokens = [
|
||||
makeToken()
|
||||
];
|
||||
return user;
|
||||
};
|
||||
|
||||
Core.expireSession = function (Sessions, safeKey) {
|
||||
var session = Sessions[safeKey];
|
||||
if (!session) { return; }
|
||||
if (session.blobstage) {
|
||||
session.blobstage.close();
|
||||
}
|
||||
delete Sessions[safeKey];
|
||||
};
|
||||
|
||||
Core.expireSessionAsync = function (Env, safeKey, cb) {
|
||||
setTimeout(function () {
|
||||
Core.expireSession(Sessions, safeKey);
|
||||
cb(void 0, 'OK');
|
||||
});
|
||||
};
|
||||
|
||||
var isTooOld = function (time, now) {
|
||||
return (now - time) > 300000;
|
||||
};
|
||||
|
||||
Core.expireSessions = function (Sessions) {
|
||||
var now = +new Date();
|
||||
Object.keys(Sessions).forEach(function (safeKey) {
|
||||
var session = Sessions[safeKey];
|
||||
if (session && isTooOld(session.atime, now)) {
|
||||
Core.expireSession(Sessions, safeKey);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
var addTokenForKey = function (Sessions, publicKey, token) {
|
||||
if (!Sessions[publicKey]) { throw new Error('undefined user'); }
|
||||
|
||||
var user = Core.getSession(Sessions, publicKey);
|
||||
user.tokens.push(token);
|
||||
user.atime = +new Date();
|
||||
if (user.tokens.length > 2) { user.tokens.shift(); }
|
||||
};
|
||||
|
||||
Core.isValidCookie = function (Sessions, publicKey, cookie) {
|
||||
var parsed = parseCookie(cookie);
|
||||
if (!parsed) { return false; }
|
||||
|
||||
var now = +new Date();
|
||||
|
||||
if (!parsed.time) { return false; }
|
||||
if (isTooOld(parsed.time, now)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// different process. try harder
|
||||
if (process.pid !== parsed.pid) {
|
||||
return false;
|
||||
}
|
||||
|
||||
var user = Core.getSession(Sessions, publicKey);
|
||||
if (!user) { return false; }
|
||||
|
||||
var idx = user.tokens.indexOf(parsed.seq);
|
||||
if (idx === -1) { return false; }
|
||||
|
||||
if (idx > 0) {
|
||||
// make a new token
|
||||
addTokenForKey(Sessions, publicKey, Core.makeToken());
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
Core.checkSignature = function (Env, signedMsg, signature, publicKey) {
|
||||
if (!(signedMsg && publicKey)) { return false; }
|
||||
|
||||
var signedBuffer;
|
||||
var pubBuffer;
|
||||
var signatureBuffer;
|
||||
|
||||
try {
|
||||
signedBuffer = Nacl.util.decodeUTF8(signedMsg);
|
||||
} catch (e) {
|
||||
Env.Log.error('INVALID_SIGNED_BUFFER', signedMsg);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
pubBuffer = Nacl.util.decodeBase64(publicKey);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
signatureBuffer = Nacl.util.decodeBase64(signature);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (pubBuffer.length !== 32) {
|
||||
Env.Log.error('PUBLIC_KEY_LENGTH', publicKey);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (signatureBuffer.length !== 64) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer);
|
||||
};
|
||||
|
||||
// E_NO_OWNERS
|
||||
Core.hasOwners = function (metadata) {
|
||||
return Boolean(metadata && Array.isArray(metadata.owners));
|
||||
};
|
||||
|
||||
Core.hasPendingOwners = function (metadata) {
|
||||
return Boolean(metadata && Array.isArray(metadata.pending_owners));
|
||||
};
|
||||
|
||||
// INSUFFICIENT_PERMISSIONS
|
||||
Core.isOwner = function (metadata, unsafeKey) {
|
||||
return metadata.owners.indexOf(unsafeKey) !== -1;
|
||||
};
|
||||
|
||||
Core.isPendingOwner = function (metadata, unsafeKey) {
|
||||
return metadata.pending_owners.indexOf(unsafeKey) !== -1;
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
/*jshint esversion: 6 */
|
||||
const Data = module.exports;
|
||||
|
||||
const Meta = require("../metadata");
|
||||
const BatchRead = require("../batch-read");
|
||||
const WriteQueue = require("../write-queue");
|
||||
const Core = require("./core");
|
||||
const Util = require("../common-util");
|
||||
|
||||
const batchMetadata = BatchRead("GET_METADATA");
|
||||
Data.getMetadata = function (Env, channel, cb) {
|
||||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
|
||||
if (channel.length !== 32) { return cb("INVALID_CHAN_LENGTH"); }
|
||||
|
||||
batchMetadata(channel, cb, function (done) {
|
||||
var ref = {};
|
||||
var lineHandler = Meta.createLineHandler(ref, Env.Log.error);
|
||||
|
||||
return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) {
|
||||
if (err) {
|
||||
// stream errors?
|
||||
return void done(err);
|
||||
}
|
||||
done(void 0, ref.meta);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/* setMetadata
|
||||
- write a new line to the metadata log if a valid command is provided
|
||||
- data is an object: {
|
||||
channel: channelId,
|
||||
command: metadataCommand (string),
|
||||
value: value
|
||||
}
|
||||
*/
|
||||
var queueMetadata = WriteQueue();
|
||||
Data.setMetadata = function (Env, safeKey, data, cb) {
|
||||
var unsafeKey = Util.unescapeKeyCharacters(safeKey);
|
||||
|
||||
var channel = data.channel;
|
||||
var command = data.command;
|
||||
if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); }
|
||||
if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); }
|
||||
if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); }
|
||||
|
||||
queueMetadata(channel, function (next) {
|
||||
Data.getMetadata(Env, channel, function (err, metadata) {
|
||||
if (err) {
|
||||
cb(err);
|
||||
return void next();
|
||||
}
|
||||
if (!Core.hasOwners(metadata)) {
|
||||
cb('E_NO_OWNERS');
|
||||
return void next();
|
||||
}
|
||||
|
||||
// if you are a pending owner and not an owner
|
||||
// you can either ADD_OWNERS, or RM_PENDING_OWNERS
|
||||
// and you should only be able to add yourself as an owner
|
||||
// everything else should be rejected
|
||||
// else if you are not an owner
|
||||
// you should be rejected
|
||||
// else write the command
|
||||
|
||||
// Confirm that the channel is owned by the user in question
|
||||
// or the user is accepting a pending ownership offer
|
||||
if (Core.hasPendingOwners(metadata) &&
|
||||
Core.isPendingOwner(metadata, unsafeKey) &&
|
||||
!Core.isOwner(metadata, unsafeKey)) {
|
||||
|
||||
// If you are a pending owner, make sure you can only add yourelf as an owner
|
||||
if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS')
|
||||
|| !Array.isArray(data.value)
|
||||
|| data.value.length !== 1
|
||||
|| data.value[0] !== unsafeKey) {
|
||||
cb('INSUFFICIENT_PERMISSIONS');
|
||||
return void next();
|
||||
}
|
||||
// FIXME wacky fallthrough is hard to read
|
||||
// we could pass this off to a writeMetadataCommand function
|
||||
// and make the flow easier to follow
|
||||
} else if (!Core.isOwner(metadata, unsafeKey)) {
|
||||
cb('INSUFFICIENT_PERMISSIONS');
|
||||
return void next();
|
||||
}
|
||||
|
||||
// Add the new metadata line
|
||||
var line = [command, data.value, +new Date()];
|
||||
var changed = false;
|
||||
try {
|
||||
changed = Meta.handleCommand(metadata, line);
|
||||
} catch (e) {
|
||||
cb(e);
|
||||
return void next();
|
||||
}
|
||||
|
||||
// if your command is valid but it didn't result in any change to the metadata,
|
||||
// call back now and don't write any "useless" line to the log
|
||||
if (!changed) {
|
||||
cb(void 0, metadata);
|
||||
return void next();
|
||||
}
|
||||
Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) {
|
||||
if (e) {
|
||||
cb(e);
|
||||
return void next();
|
||||
}
|
||||
cb(void 0, metadata);
|
||||
next();
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,464 @@
|
|||
/*jshint esversion: 6 */
|
||||
const Core = require("./core");
|
||||
|
||||
const BatchRead = require("../batch-read");
|
||||
const Pins = require("../pins");
|
||||
|
||||
const Pinning = module.exports;
|
||||
const Nacl = require("tweetnacl/nacl-fast");
|
||||
const Util = require("../common-util");
|
||||
const nThen = require("nthen");
|
||||
const Saferphore = require("saferphore");
|
||||
const Pinned = require('../../scripts/pinned');
|
||||
|
||||
//const escapeKeyCharacters = Util.escapeKeyCharacters;
|
||||
const unescapeKeyCharacters = Util.unescapeKeyCharacters;
|
||||
|
||||
var sumChannelSizes = function (sizes) {
|
||||
return Object.keys(sizes).map(function (id) { return sizes[id]; })
|
||||
.filter(function (x) {
|
||||
// only allow positive numbers
|
||||
return !(typeof(x) !== 'number' || x <= 0);
|
||||
})
|
||||
.reduce(function (a, b) { return a + b; }, 0);
|
||||
};
|
||||
|
||||
// XXX it's possible for this to respond before the server has had a chance
|
||||
// to fetch the limits. Maybe we should respond with an error...
|
||||
// or wait until we actually know the limits before responding
|
||||
var getLimit = Pinning.getLimit = function (Env, publicKey, cb) {
|
||||
var unescapedKey = unescapeKeyCharacters(publicKey);
|
||||
var limit = Env.limits[unescapedKey];
|
||||
var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'?
|
||||
Env.defaultStorageLimit: Core.DEFAULT_LIMIT;
|
||||
|
||||
var toSend = limit && typeof(limit.limit) === "number"?
|
||||
[limit.limit, limit.plan, limit.note] : [defaultLimit, '', ''];
|
||||
|
||||
cb(void 0, toSend);
|
||||
};
|
||||
|
||||
var addPinned = function (
|
||||
Env,
|
||||
publicKey /*:string*/,
|
||||
channelList /*Array<string>*/,
|
||||
cb /*:()=>void*/)
|
||||
{
|
||||
Env.evPinnedPadsReady.reg(() => {
|
||||
channelList.forEach((c) => {
|
||||
const x = Env.pinnedPads[c] = Env.pinnedPads[c] || {};
|
||||
x[publicKey] = 1;
|
||||
});
|
||||
cb();
|
||||
});
|
||||
};
|
||||
var removePinned = function (
|
||||
Env,
|
||||
publicKey /*:string*/,
|
||||
channelList /*Array<string>*/,
|
||||
cb /*:()=>void*/)
|
||||
{
|
||||
Env.evPinnedPadsReady.reg(() => {
|
||||
channelList.forEach((c) => {
|
||||
const x = Env.pinnedPads[c];
|
||||
if (!x) { return; }
|
||||
delete x[publicKey];
|
||||
});
|
||||
cb();
|
||||
});
|
||||
};
|
||||
|
||||
var getMultipleFileSize = function (Env, channels, cb) {
|
||||
if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); }
|
||||
if (typeof(Env.msgStore.getChannelSize) !== 'function') {
|
||||
return cb('GET_CHANNEL_SIZE_UNSUPPORTED');
|
||||
}
|
||||
|
||||
var i = channels.length;
|
||||
var counts = {};
|
||||
|
||||
var done = function () {
|
||||
i--;
|
||||
if (i === 0) { return cb(void 0, counts); }
|
||||
};
|
||||
|
||||
channels.forEach(function (channel) {
|
||||
Pinning.getFileSize(Env, channel, function (e, size) {
|
||||
if (e) {
|
||||
// most likely error here is that a file no longer exists
|
||||
// but a user still has it in their drive, and wants to know
|
||||
// its size. We should find a way to inform them of this in
|
||||
// the future. For now we can just tell them it has no size.
|
||||
|
||||
//WARN('getFileSize', e);
|
||||
counts[channel] = 0;
|
||||
return done();
|
||||
}
|
||||
counts[channel] = size;
|
||||
done();
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const batchUserPins = BatchRead("LOAD_USER_PINS");
|
||||
var loadUserPins = function (Env, publicKey, cb) {
|
||||
var session = Core.getSession(Env.Sessions, publicKey);
|
||||
|
||||
if (session.channels) {
|
||||
return cb(session.channels);
|
||||
}
|
||||
|
||||
batchUserPins(publicKey, cb, function (done) {
|
||||
var ref = {};
|
||||
var lineHandler = Pins.createLineHandler(ref, function (label, data) {
|
||||
Env.Log.error(label, {
|
||||
log: publicKey,
|
||||
data: data,
|
||||
});
|
||||
});
|
||||
|
||||
// if channels aren't in memory. load them from disk
|
||||
Env.pinStore.getMessages(publicKey, lineHandler, function () {
|
||||
// no more messages
|
||||
|
||||
// only put this into the cache if it completes
|
||||
session.channels = ref.pins;
|
||||
done(ref.pins); // FIXME no error handling?
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
var truthyKeys = function (O) {
|
||||
return Object.keys(O).filter(function (k) {
|
||||
return O[k];
|
||||
});
|
||||
};
|
||||
|
||||
var getChannelList = Pinning.getChannelList = function (Env, publicKey, _cb) {
|
||||
var cb = Util.once(Util.mkAsync(_cb));
|
||||
loadUserPins(Env, publicKey, function (pins) {
|
||||
cb(truthyKeys(pins));
|
||||
});
|
||||
};
|
||||
|
||||
const batchTotalSize = BatchRead("GET_TOTAL_SIZE");
|
||||
Pinning.getTotalSize = function (Env, publicKey, cb) {
|
||||
var unescapedKey = unescapeKeyCharacters(publicKey);
|
||||
var limit = Env.limits[unescapedKey];
|
||||
|
||||
// Get a common key if multiple users share the same quota, otherwise take the public key
|
||||
var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : publicKey;
|
||||
|
||||
batchTotalSize(batchKey, cb, function (done) {
|
||||
var channels = [];
|
||||
var bytes = 0;
|
||||
nThen(function (waitFor) {
|
||||
// Get the channels list for our user account
|
||||
Pinning.getChannelList(Env, publicKey, waitFor(function (_channels) {
|
||||
if (!_channels) {
|
||||
waitFor.abort();
|
||||
return done('INVALID_PIN_LIST');
|
||||
}
|
||||
Array.prototype.push.apply(channels, _channels);
|
||||
}));
|
||||
// Get the channels list for users sharing our quota
|
||||
if (limit && Array.isArray(limit.users) && limit.users.length > 1) {
|
||||
limit.users.forEach(function (key) {
|
||||
if (key === unescapedKey) { return; } // Don't count ourselves twice
|
||||
getChannelList(Env, key, waitFor(function (_channels) {
|
||||
if (!_channels) { return; } // Broken user, don't count their quota
|
||||
Array.prototype.push.apply(channels, _channels);
|
||||
}));
|
||||
});
|
||||
}
|
||||
}).nThen(function (waitFor) {
|
||||
// Get size of the channels
|
||||
var list = []; // Contains the channels already counted in the quota to avoid duplicates
|
||||
channels.forEach(function (channel) { // TODO semaphore?
|
||||
if (list.indexOf(channel) !== -1) { return; }
|
||||
list.push(channel);
|
||||
Pinning.getFileSize(Env, channel, waitFor(function (e, size) {
|
||||
if (!e) { bytes += size; }
|
||||
}));
|
||||
});
|
||||
}).nThen(function () {
|
||||
done(void 0, bytes);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/* Users should be able to clear their own pin log with an authenticated RPC
|
||||
*/
|
||||
Pinning.removePins = function (Env, safeKey, cb) {
|
||||
if (typeof(Env.pinStore.removeChannel) !== 'function') {
|
||||
return void cb("E_NOT_IMPLEMENTED");
|
||||
}
|
||||
Env.pinStore.removeChannel(safeKey, function (err) {
|
||||
Env.Log.info('DELETION_PIN_BY_OWNER_RPC', {
|
||||
safeKey: safeKey,
|
||||
status: err? String(err): 'SUCCESS',
|
||||
});
|
||||
|
||||
cb(err);
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.trimPins = function (Env, safeKey, cb) {
|
||||
// XXX trim to latest pin checkpoint
|
||||
cb("NOT_IMPLEMENTED");
|
||||
};
|
||||
|
||||
var getFreeSpace = Pinning.getFreeSpace = function (Env, publicKey, cb) {
|
||||
getLimit(Env, publicKey, function (e, limit) {
|
||||
if (e) { return void cb(e); }
|
||||
Pinning.getTotalSize(Env, publicKey, function (e, size) {
|
||||
if (typeof(size) === 'undefined') { return void cb(e); }
|
||||
|
||||
var rem = limit[0] - size;
|
||||
if (typeof(rem) !== 'number') {
|
||||
return void cb('invalid_response');
|
||||
}
|
||||
cb(void 0, rem);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
var hashChannelList = function (A) {
|
||||
var uniques = [];
|
||||
|
||||
A.forEach(function (a) {
|
||||
if (uniques.indexOf(a) === -1) { uniques.push(a); }
|
||||
});
|
||||
uniques.sort();
|
||||
|
||||
var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl
|
||||
.util.decodeUTF8(JSON.stringify(uniques))));
|
||||
|
||||
return hash;
|
||||
};
|
||||
|
||||
var getHash = Pinning.getHash = function (Env, publicKey, cb) {
|
||||
getChannelList(Env, publicKey, function (channels) {
|
||||
cb(void 0, hashChannelList(channels));
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.pinChannel = function (Env, publicKey, channels, cb) {
|
||||
if (!channels && channels.filter) {
|
||||
return void cb('INVALID_PIN_LIST');
|
||||
}
|
||||
|
||||
// get channel list ensures your session has a cached channel list
|
||||
getChannelList(Env, publicKey, function (pinned) {
|
||||
var session = Core.getSession(Env.Sessions, publicKey);
|
||||
|
||||
// only pin channels which are not already pinned
|
||||
var toStore = channels.filter(function (channel) {
|
||||
return pinned.indexOf(channel) === -1;
|
||||
});
|
||||
|
||||
if (toStore.length === 0) {
|
||||
return void getHash(Env, publicKey, cb);
|
||||
}
|
||||
|
||||
getMultipleFileSize(Env, toStore, function (e, sizes) {
|
||||
if (typeof(sizes) === 'undefined') { return void cb(e); }
|
||||
var pinSize = sumChannelSizes(sizes);
|
||||
|
||||
getFreeSpace(Env, publicKey, function (e, free) {
|
||||
if (typeof(free) === 'undefined') {
|
||||
Env.WARN('getFreeSpace', e);
|
||||
return void cb(e);
|
||||
}
|
||||
if (pinSize > free) { return void cb('E_OVER_LIMIT'); }
|
||||
|
||||
Env.pinStore.message(publicKey, JSON.stringify(['PIN', toStore, +new Date()]),
|
||||
function (e) {
|
||||
if (e) { return void cb(e); }
|
||||
toStore.forEach(function (channel) {
|
||||
session.channels[channel] = true;
|
||||
});
|
||||
addPinned(Env, publicKey, toStore, () => {});
|
||||
getHash(Env, publicKey, cb);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.unpinChannel = function (Env, publicKey, channels, cb) {
|
||||
if (!channels && channels.filter) {
|
||||
// expected array
|
||||
return void cb('INVALID_PIN_LIST');
|
||||
}
|
||||
|
||||
getChannelList(Env, publicKey, function (pinned) {
|
||||
var session = Core.getSession(Env.Sessions, publicKey);
|
||||
|
||||
// only unpin channels which are pinned
|
||||
var toStore = channels.filter(function (channel) {
|
||||
return pinned.indexOf(channel) !== -1;
|
||||
});
|
||||
|
||||
if (toStore.length === 0) {
|
||||
return void getHash(Env, publicKey, cb);
|
||||
}
|
||||
|
||||
Env.pinStore.message(publicKey, JSON.stringify(['UNPIN', toStore, +new Date()]),
|
||||
function (e) {
|
||||
if (e) { return void cb(e); }
|
||||
toStore.forEach(function (channel) {
|
||||
delete session.channels[channel];
|
||||
});
|
||||
removePinned(Env, publicKey, toStore, () => {});
|
||||
getHash(Env, publicKey, cb);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.resetUserPins = function (Env, publicKey, channelList, cb) {
|
||||
if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); }
|
||||
var session = Core.getSession(Env.Sessions, publicKey);
|
||||
|
||||
if (!channelList.length) {
|
||||
return void getHash(Env, publicKey, function (e, hash) {
|
||||
if (e) { return cb(e); }
|
||||
cb(void 0, hash);
|
||||
});
|
||||
}
|
||||
|
||||
var pins = {};
|
||||
getMultipleFileSize(Env, channelList, function (e, sizes) {
|
||||
if (typeof(sizes) === 'undefined') { return void cb(e); }
|
||||
var pinSize = sumChannelSizes(sizes);
|
||||
|
||||
|
||||
getLimit(Env, publicKey, function (e, limit) {
|
||||
if (e) {
|
||||
Env.WARN('[RESET_ERR]', e);
|
||||
return void cb(e);
|
||||
}
|
||||
|
||||
/* we want to let people pin, even if they are over their limit,
|
||||
but they should only be able to do this once.
|
||||
|
||||
This prevents data loss in the case that someone registers, but
|
||||
does not have enough free space to pin their migrated data.
|
||||
|
||||
They will not be able to pin additional pads until they upgrade
|
||||
or delete enough files to go back under their limit. */
|
||||
if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); }
|
||||
Env.pinStore.message(publicKey, JSON.stringify(['RESET', channelList, +new Date()]),
|
||||
function (e) {
|
||||
if (e) { return void cb(e); }
|
||||
channelList.forEach(function (channel) {
|
||||
pins[channel] = true;
|
||||
});
|
||||
|
||||
var oldChannels;
|
||||
if (session.channels && typeof(session.channels) === 'object') {
|
||||
oldChannels = Object.keys(session.channels);
|
||||
} else {
|
||||
oldChannels = [];
|
||||
}
|
||||
removePinned(Env, publicKey, oldChannels, () => {
|
||||
addPinned(Env, publicKey, channelList, ()=>{});
|
||||
});
|
||||
|
||||
// update in-memory cache IFF the reset was allowed.
|
||||
session.channels = pins;
|
||||
getHash(Env, publicKey, function (e, hash) {
|
||||
cb(e, hash);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.getFileSize = function (Env, channel, _cb) {
|
||||
var cb = Util.once(Util.mkAsync(_cb));
|
||||
if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); }
|
||||
if (channel.length === 32) {
|
||||
if (typeof(Env.msgStore.getChannelSize) !== 'function') {
|
||||
return cb('GET_CHANNEL_SIZE_UNSUPPORTED');
|
||||
}
|
||||
|
||||
return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) {
|
||||
if (e) {
|
||||
if (e.code === 'ENOENT') { return void cb(void 0, 0); }
|
||||
return void cb(e.code);
|
||||
}
|
||||
cb(void 0, size);
|
||||
});
|
||||
}
|
||||
|
||||
// 'channel' refers to a file, so you need another API
|
||||
Env.blobStore.size(channel, function (e, size) {
|
||||
if (typeof(size) === 'undefined') { return void cb(e); }
|
||||
cb(void 0, size);
|
||||
});
|
||||
};
|
||||
|
||||
/* accepts a list, and returns a sublist of channel or file ids which seem
|
||||
to have been deleted from the server (file size 0)
|
||||
|
||||
we might consider that we should only say a file is gone if fs.stat returns
|
||||
ENOENT, but for now it's simplest to just rely on getFileSize...
|
||||
*/
|
||||
Pinning.getDeletedPads = function (Env, channels, cb) {
|
||||
if (!Array.isArray(channels)) { return cb('INVALID_LIST'); }
|
||||
var L = channels.length;
|
||||
|
||||
var sem = Saferphore.create(10);
|
||||
var absentees = [];
|
||||
|
||||
var job = function (channel, wait) {
|
||||
return function (give) {
|
||||
Pinning.getFileSize(Env, channel, wait(give(function (e, size) {
|
||||
if (e) { return; }
|
||||
if (size === 0) { absentees.push(channel); }
|
||||
})));
|
||||
};
|
||||
};
|
||||
|
||||
nThen(function (w) {
|
||||
for (var i = 0; i < L; i++) {
|
||||
sem.take(job(channels[i], w));
|
||||
}
|
||||
}).nThen(function () {
|
||||
cb(void 0, absentees);
|
||||
});
|
||||
};
|
||||
|
||||
// inform that the
|
||||
Pinning.loadChannelPins = function (Env) {
|
||||
Pinned.load(function (err, data) {
|
||||
if (err) {
|
||||
Env.Log.error("LOAD_CHANNEL_PINS", err);
|
||||
|
||||
// FIXME not sure what should be done here instead
|
||||
Env.pinnedPads = {};
|
||||
Env.evPinnedPadsReady.fire();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
Env.pinnedPads = data;
|
||||
Env.evPinnedPadsReady.fire();
|
||||
}, {
|
||||
pinPath: Env.paths.pin,
|
||||
});
|
||||
};
|
||||
|
||||
Pinning.isChannelPinned = function (Env, channel, cb) {
|
||||
Env.evPinnedPadsReady.reg(() => {
|
||||
if (Env.pinnedPads[channel] && Object.keys(Env.pinnedPads[channel]).length) {
|
||||
cb(true);
|
||||
} else {
|
||||
delete Env.pinnedPads[channel];
|
||||
cb(false);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
/*jshint esversion: 6 */
|
||||
/* globals Buffer*/
|
||||
const Quota = module.exports;
|
||||
|
||||
const Core = require("./core");
|
||||
const Util = require("../common-util");
|
||||
const Package = require('../../package.json');
|
||||
const Https = require("https");
|
||||
|
||||
Quota.applyCustomLimits = function (Env) {
|
||||
var isLimit = function (o) {
|
||||
var valid = o && typeof(o) === 'object' &&
|
||||
typeof(o.limit) === 'number' &&
|
||||
typeof(o.plan) === 'string' &&
|
||||
typeof(o.note) === 'string';
|
||||
return valid;
|
||||
};
|
||||
|
||||
// read custom limits from the Environment (taken from config)
|
||||
var customLimits = (function (custom) {
|
||||
var limits = {};
|
||||
Object.keys(custom).forEach(function (k) {
|
||||
k.replace(/\/([^\/]+)$/, function (all, safeKey) {
|
||||
var id = Util.unescapeKeyCharacters(safeKey || '');
|
||||
limits[id] = custom[k];
|
||||
return '';
|
||||
});
|
||||
});
|
||||
return limits;
|
||||
}(Env.customLimits || {}));
|
||||
|
||||
Object.keys(customLimits).forEach(function (k) {
|
||||
if (!isLimit(customLimits[k])) { return; }
|
||||
Env.limits[k] = customLimits[k];
|
||||
});
|
||||
};
|
||||
|
||||
// The limits object contains storage limits for all the publicKey that have paid
|
||||
// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit
|
||||
// XXX maybe the use case with a publicKey should be a different command that calls this?
|
||||
Quota.updateLimits = function (Env, publicKey, cb) { // FIXME BATCH?S
|
||||
|
||||
if (Env.adminEmail === false) {
|
||||
Quota.applyCustomLimits(Env);
|
||||
if (Env.allowSubscriptions === false) { return; }
|
||||
throw new Error("allowSubscriptions must be false if adminEmail is false");
|
||||
}
|
||||
if (typeof cb !== "function") { cb = function () {}; }
|
||||
|
||||
var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'?
|
||||
Env.defaultStorageLimit: Core.DEFAULT_LIMIT;
|
||||
|
||||
var userId;
|
||||
if (publicKey) {
|
||||
userId = Util.unescapeKeyCharacters(publicKey);
|
||||
}
|
||||
|
||||
var body = JSON.stringify({
|
||||
domain: Env.myDomain,
|
||||
subdomain: Env.mySubdomain || null,
|
||||
adminEmail: Env.adminEmail,
|
||||
version: Package.version
|
||||
});
|
||||
var options = {
|
||||
host: 'accounts.cryptpad.fr',
|
||||
path: '/api/getauthorized',
|
||||
method: 'POST',
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
"Content-Length": Buffer.byteLength(body)
|
||||
}
|
||||
};
|
||||
|
||||
var req = Https.request(options, function (response) {
|
||||
if (!('' + response.statusCode).match(/^2\d\d$/)) {
|
||||
return void cb('SERVER ERROR ' + response.statusCode);
|
||||
}
|
||||
var str = '';
|
||||
|
||||
response.on('data', function (chunk) {
|
||||
str += chunk;
|
||||
});
|
||||
|
||||
response.on('end', function () {
|
||||
try {
|
||||
var json = JSON.parse(str);
|
||||
Env.limits = json;
|
||||
Quota.applyCustomLimits(Env);
|
||||
|
||||
var l;
|
||||
if (userId) {
|
||||
var limit = Env.limits[userId];
|
||||
l = limit && typeof limit.limit === "number" ?
|
||||
[limit.limit, limit.plan, limit.note] : [defaultLimit, '', ''];
|
||||
}
|
||||
cb(void 0, l);
|
||||
} catch (e) {
|
||||
cb(e);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
req.on('error', function (e) {
|
||||
Quota.applyCustomLimits(Env);
|
||||
if (!Env.domain) { return cb(); } // XXX
|
||||
cb(e);
|
||||
});
|
||||
|
||||
req.end(body);
|
||||
};
|
||||
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
/*jshint esversion: 6 */
|
||||
const Upload = module.exports;
|
||||
const Util = require("../common-util");
|
||||
const Pinning = require("./pin-rpc");
|
||||
const nThen = require("nthen");
|
||||
const Core = require("./core");
|
||||
|
||||
Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES
|
||||
var cb = Util.once(Util.mkAsync(_cb));
|
||||
|
||||
// validate that the provided size is actually a positive number
|
||||
if (typeof(filesize) !== 'number' &&
|
||||
filesize >= 0) { return void cb('E_INVALID_SIZE'); }
|
||||
|
||||
if (filesize >= Env.maxUploadSize) { return cb('TOO_LARGE'); }
|
||||
|
||||
nThen(function (w) {
|
||||
var abortAndCB = Util.both(w.abort, cb);
|
||||
Env.blobStore.status(safeKey, w(function (err, inProgress) {
|
||||
// if there's an error something is weird
|
||||
if (err) { return void abortAndCB(err); }
|
||||
|
||||
// we cannot upload two things at once
|
||||
if (inProgress) { return void abortAndCB(void 0, true); }
|
||||
}));
|
||||
}).nThen(function () {
|
||||
// if yuo're here then there are no pending uploads
|
||||
// check if you have space in your quota to upload something of this size
|
||||
Pinning.getFreeSpace(Env, safeKey, function (e, free) {
|
||||
if (e) { return void cb(e); }
|
||||
if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); }
|
||||
|
||||
var user = Core.getSession(Env.Sessions, safeKey);
|
||||
user.pendingUploadSize = filesize;
|
||||
user.currentUploadSize = 0;
|
||||
|
||||
cb(void 0, false);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
Upload.upload = function (Env, safeKey, chunk, cb) {
|
||||
Env.blobStore.upload(safeKey, chunk, cb);
|
||||
};
|
||||
|
||||
Upload.complete = function (Env, safeKey, arg, cb) {
|
||||
Env.blobStore.complete(safeKey, arg, cb);
|
||||
};
|
||||
|
||||
Upload.cancel = function (Env, safeKey, arg, cb) {
|
||||
Env.blobStore.cancel(safeKey, arg, cb);
|
||||
};
|
||||
|
||||
Upload.complete_owned = function (Env, safeKey, arg, cb) {
|
||||
Env.blobStore.completeOwned(safeKey, arg, cb);
|
||||
};
|
||||
|
|
@ -1,38 +1,25 @@
|
|||
/* jshint esversion: 6 */
|
||||
/* global Buffer */
|
||||
;(function () { 'use strict';
|
||||
|
||||
const nThen = require('nthen');
|
||||
const Nacl = require('tweetnacl/nacl-fast');
|
||||
const Crypto = require('crypto');
|
||||
const Once = require("./lib/once");
|
||||
const Meta = require("./lib/metadata");
|
||||
const WriteQueue = require("./lib/write-queue");
|
||||
const BatchRead = require("./lib/batch-read");
|
||||
const Once = require("./once");
|
||||
const Meta = require("./metadata");
|
||||
const WriteQueue = require("./write-queue");
|
||||
const BatchRead = require("./batch-read");
|
||||
|
||||
const RPC = require("./rpc");
|
||||
|
||||
const Extras = require("./hk-util.js");
|
||||
const STANDARD_CHANNEL_LENGTH = Extras.STANDARD_CHANNEL_LENGTH;
|
||||
const EPHEMERAL_CHANNEL_LENGTH = Extras.EPHEMERAL_CHANNEL_LENGTH;
|
||||
|
||||
let Log;
|
||||
const now = function () { return (new Date()).getTime(); };
|
||||
const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds
|
||||
|
||||
/* getHash
|
||||
* this function slices off the leading portion of a message which is
|
||||
most likely unique
|
||||
* these "hashes" are used to identify particular messages in a channel's history
|
||||
* clients store "hashes" either in memory or in their drive to query for new messages:
|
||||
* when reconnecting to a pad
|
||||
* when connecting to chat or a mailbox
|
||||
* thus, we can't change this function without invalidating client data which:
|
||||
* is encrypted clientside
|
||||
* can't be easily migrated
|
||||
* don't break it!
|
||||
*/
|
||||
const getHash = function (msg) {
|
||||
if (typeof(msg) !== 'string') {
|
||||
Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg);
|
||||
return '';
|
||||
}
|
||||
return msg.slice(0,64);
|
||||
};
|
||||
const getHash = Extras.getHash;
|
||||
|
||||
const tryParse = function (str) {
|
||||
try {
|
||||
|
@ -79,28 +66,22 @@ const isValidValidateKeyString = function (key) {
|
|||
}
|
||||
};
|
||||
|
||||
module.exports.create = function (cfg) {
|
||||
const rpc = cfg.rpc;
|
||||
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/;
|
||||
|
||||
module.exports.create = function (cfg, cb) {
|
||||
var rpc;
|
||||
const tasks = cfg.tasks;
|
||||
const store = cfg.store;
|
||||
const retainData = cfg.retainData;
|
||||
Log = cfg.log;
|
||||
|
||||
Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE');
|
||||
|
||||
const metadata_cache = {};
|
||||
const channel_cache = {};
|
||||
const HISTORY_KEEPER_ID = Crypto.randomBytes(8).toString('hex');
|
||||
|
||||
Log.verbose('HK_ID', 'History keeper ID: ' + HISTORY_KEEPER_ID);
|
||||
|
||||
let sendMsg = function () {};
|
||||
let STANDARD_CHANNEL_LENGTH, EPHEMERAL_CHANNEL_LENGTH;
|
||||
const setConfig = function (config) {
|
||||
STANDARD_CHANNEL_LENGTH = config.STANDARD_CHANNEL_LENGTH;
|
||||
EPHEMERAL_CHANNEL_LENGTH = config.EPHEMERAL_CHANNEL_LENGTH;
|
||||
sendMsg = config.sendMsg;
|
||||
};
|
||||
|
||||
/* computeIndex
|
||||
can call back with an error or a computed index which includes:
|
||||
* cpIndex:
|
||||
|
@ -186,7 +167,7 @@ module.exports.create = function (cfg) {
|
|||
if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') {
|
||||
// msgObj.offset is API guaranteed by our storage module
|
||||
// it should always be a valid positive integer
|
||||
offsetByHash[getHash(msg[4])] = msgObj.offset;
|
||||
offsetByHash[getHash(msg[4], Log)] = msgObj.offset;
|
||||
}
|
||||
// There is a trailing \n at the end of the file
|
||||
size = msgObj.offset + msgObj.buff.length + 1;
|
||||
|
@ -233,8 +214,9 @@ module.exports.create = function (cfg) {
|
|||
if the channel exists but its index does not then it caches the index
|
||||
*/
|
||||
const batchIndexReads = BatchRead("HK_GET_INDEX");
|
||||
const getIndex = (ctx, channelName, cb) => {
|
||||
const chan = ctx.channels[channelName];
|
||||
const getIndex = (channelName, cb) => {
|
||||
const chan = channel_cache[channelName];
|
||||
|
||||
// if there is a channel in memory and it has an index cached, return it
|
||||
if (chan && chan.index) {
|
||||
// enforce async behaviour
|
||||
|
@ -255,15 +237,7 @@ module.exports.create = function (cfg) {
|
|||
});
|
||||
};
|
||||
|
||||
/*::
|
||||
type cp_index_item = {
|
||||
offset: number,
|
||||
line: number
|
||||
}
|
||||
*/
|
||||
|
||||
/* storeMessage
|
||||
* ctx
|
||||
* channel id
|
||||
* the message to store
|
||||
* whether the message is a checkpoint
|
||||
|
@ -282,7 +256,7 @@ module.exports.create = function (cfg) {
|
|||
*/
|
||||
const queueStorage = WriteQueue();
|
||||
|
||||
const storeMessage = function (ctx, channel, msg, isCp, optionalMessageHash) {
|
||||
const storeMessage = function (channel, msg, isCp, optionalMessageHash) {
|
||||
const id = channel.id;
|
||||
|
||||
queueStorage(id, function (next) {
|
||||
|
@ -306,7 +280,7 @@ module.exports.create = function (cfg) {
|
|||
}
|
||||
}));
|
||||
}).nThen((waitFor) => {
|
||||
getIndex(ctx, id, waitFor((err, index) => {
|
||||
getIndex(id, waitFor((err, index) => {
|
||||
if (err) {
|
||||
Log.warn("HK_STORE_MESSAGE_INDEX", err.stack);
|
||||
// non-critical, we'll be able to get the channel index later
|
||||
|
@ -320,10 +294,10 @@ module.exports.create = function (cfg) {
|
|||
delete index.offsetByHash[k];
|
||||
}
|
||||
}
|
||||
index.cpIndex.push(({
|
||||
index.cpIndex.push({
|
||||
offset: index.size,
|
||||
line: ((index.line || 0) + 1)
|
||||
} /*:cp_index_item*/));
|
||||
});
|
||||
}
|
||||
if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; }
|
||||
index.size += msgBin.length;
|
||||
|
@ -335,38 +309,26 @@ module.exports.create = function (cfg) {
|
|||
});
|
||||
};
|
||||
|
||||
/* historyKeeperBroadcast
|
||||
* uses API from the netflux server to send messages to every member of a channel
|
||||
* sendMsg runs in a try-catch and drops users if sending a message fails
|
||||
*/
|
||||
const historyKeeperBroadcast = function (ctx, channel, msg) {
|
||||
let chan = ctx.channels[channel] || (([] /*:any*/) /*:Chan_t*/);
|
||||
chan.forEach(function (user) {
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)]);
|
||||
});
|
||||
};
|
||||
|
||||
/* expireChannel is here to clean up channels that should have been removed
|
||||
but for some reason are still present
|
||||
*/
|
||||
const expireChannel = function (ctx, channel) {
|
||||
if (retainData) {
|
||||
return void store.archiveChannel(channel, function (err) {
|
||||
Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", {
|
||||
channelId: channel,
|
||||
status: err? String(err): "SUCCESS",
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
store.removeChannel(channel, function (err) {
|
||||
Log.info("DELETION_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", {
|
||||
channelid: channel,
|
||||
const expireChannel = function (channel) {
|
||||
return void store.archiveChannel(channel, function (err) {
|
||||
Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", {
|
||||
channelId: channel,
|
||||
status: err? String(err): "SUCCESS",
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
/* dropChannel
|
||||
* cleans up memory structures which are managed entirely by the historyKeeper
|
||||
*/
|
||||
const dropChannel = function (chanName) {
|
||||
delete metadata_cache[chanName];
|
||||
delete channel_cache[chanName];
|
||||
};
|
||||
|
||||
/* checkExpired
|
||||
* synchronously returns true or undefined to indicate whether the channel is expired
|
||||
* according to its metadata
|
||||
|
@ -378,7 +340,7 @@ module.exports.create = function (cfg) {
|
|||
|
||||
FIXME the boolean nature of this API should be separated from its side effects
|
||||
*/
|
||||
const checkExpired = function (ctx, channel) {
|
||||
const checkExpired = function (Server, channel) {
|
||||
if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; }
|
||||
let metadata = metadata_cache[channel];
|
||||
if (!(metadata && typeof(metadata.expire) === 'number')) { return false; }
|
||||
|
@ -393,26 +355,21 @@ module.exports.create = function (cfg) {
|
|||
// there may have been a problem with scheduling tasks
|
||||
// or the scheduled tasks may not be running
|
||||
// so trigger a removal from here
|
||||
if (pastDue >= ONE_DAY) { expireChannel(ctx, channel); }
|
||||
if (pastDue >= ONE_DAY) { expireChannel(channel); }
|
||||
|
||||
// close the channel
|
||||
store.closeChannel(channel, function () {
|
||||
historyKeeperBroadcast(ctx, channel, {
|
||||
Server.channelBroadcast(channel, {
|
||||
error: 'EEXPIRED',
|
||||
channel: channel
|
||||
});
|
||||
// remove it from any caches after you've told anyone in the channel
|
||||
// that it has expired
|
||||
delete ctx.channels[channel];
|
||||
delete metadata_cache[channel];
|
||||
}, HISTORY_KEEPER_ID);
|
||||
dropChannel(channel);
|
||||
});
|
||||
|
||||
// return true to indicate that it has expired
|
||||
return true;
|
||||
};
|
||||
|
||||
var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/;
|
||||
|
||||
/* onChannelMessage
|
||||
Determine what we should store when a message a broadcasted to a channel"
|
||||
|
||||
|
@ -424,7 +381,7 @@ module.exports.create = function (cfg) {
|
|||
* adds timestamps to incoming messages
|
||||
* writes messages to the store
|
||||
*/
|
||||
const onChannelMessage = function (ctx, channel, msgStruct) {
|
||||
const onChannelMessage = function (Server, channel, msgStruct) {
|
||||
// TODO our usage of 'channel' here looks prone to errors
|
||||
// we only use it for its 'id', but it can contain other stuff
|
||||
// also, we're using this RPC from both the RPC and Netflux-server
|
||||
|
@ -447,7 +404,7 @@ module.exports.create = function (cfg) {
|
|||
let metadata;
|
||||
nThen(function (w) {
|
||||
// getIndex (and therefore the latest metadata)
|
||||
getIndex(ctx, channel.id, w(function (err, index) {
|
||||
getIndex(channel.id, w(function (err, index) {
|
||||
if (err) {
|
||||
w.abort();
|
||||
return void Log.error('CHANNEL_MESSAGE_ERROR', err);
|
||||
|
@ -462,7 +419,7 @@ module.exports.create = function (cfg) {
|
|||
metadata = index.metadata;
|
||||
|
||||
// don't write messages to expired channels
|
||||
if (checkExpired(ctx, channel)) { return void w.abort(); }
|
||||
if (checkExpired(Server, channel)) { return void w.abort(); }
|
||||
|
||||
// if there's no validateKey present skip to the next block
|
||||
if (!metadata.validateKey) { return; }
|
||||
|
@ -512,20 +469,10 @@ module.exports.create = function (cfg) {
|
|||
msgStruct.push(now());
|
||||
|
||||
// storeMessage
|
||||
storeMessage(ctx, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4]));
|
||||
storeMessage(channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log));
|
||||
});
|
||||
};
|
||||
|
||||
/* dropChannel
|
||||
* exported as API
|
||||
* used by chainpad-server/NetfluxWebsocketSrv.js
|
||||
* cleans up memory structures which are managed entirely by the historyKeeper
|
||||
* the netflux server manages other memory in ctx.channels
|
||||
*/
|
||||
const dropChannel = function (chanName) {
|
||||
delete metadata_cache[chanName];
|
||||
};
|
||||
|
||||
/* getHistoryOffset
|
||||
returns a number representing the byte offset from the start of the log
|
||||
for whatever history you're seeking.
|
||||
|
@ -555,12 +502,12 @@ module.exports.create = function (cfg) {
|
|||
* -1 if you didn't find it
|
||||
|
||||
*/
|
||||
const getHistoryOffset = (ctx, channelName, lastKnownHash, cb /*:(e:?Error, os:?number)=>void*/) => {
|
||||
const getHistoryOffset = (channelName, lastKnownHash, cb) => {
|
||||
// lastKnownhash === -1 means we want the complete history
|
||||
if (lastKnownHash === -1) { return void cb(null, 0); }
|
||||
let offset = -1;
|
||||
nThen((waitFor) => {
|
||||
getIndex(ctx, channelName, waitFor((err, index) => {
|
||||
getIndex(channelName, waitFor((err, index) => {
|
||||
if (err) { waitFor.abort(); return void cb(err); }
|
||||
|
||||
// check if the "hash" the client is requesting exists in the index
|
||||
|
@ -611,7 +558,7 @@ module.exports.create = function (cfg) {
|
|||
const msg = tryParse(msgObj.buff.toString('utf8'));
|
||||
// if it was undefined then go onto the next message
|
||||
if (typeof msg === "undefined") { return readMore(); }
|
||||
if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4])) {
|
||||
if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4], Log)) {
|
||||
return void readMore();
|
||||
}
|
||||
offset = msgObj.offset;
|
||||
|
@ -633,10 +580,10 @@ module.exports.create = function (cfg) {
|
|||
* GET_HISTORY
|
||||
|
||||
*/
|
||||
const getHistoryAsync = (ctx, channelName, lastKnownHash, beforeHash, handler, cb) => {
|
||||
const getHistoryAsync = (channelName, lastKnownHash, beforeHash, handler, cb) => {
|
||||
let offset = -1;
|
||||
nThen((waitFor) => {
|
||||
getHistoryOffset(ctx, channelName, lastKnownHash, waitFor((err, os) => {
|
||||
getHistoryOffset(channelName, lastKnownHash, waitFor((err, os) => {
|
||||
if (err) {
|
||||
waitFor.abort();
|
||||
return void cb(err);
|
||||
|
@ -682,7 +629,7 @@ module.exports.create = function (cfg) {
|
|||
var content = parsed[4];
|
||||
if (typeof(content) !== 'string') { return; }
|
||||
|
||||
var hash = getHash(content);
|
||||
var hash = getHash(content, Log);
|
||||
if (hash === oldestKnownHash) {
|
||||
found = true;
|
||||
}
|
||||
|
@ -699,42 +646,50 @@ module.exports.create = function (cfg) {
|
|||
/* onChannelCleared
|
||||
* broadcasts to all clients in a channel if that channel is deleted
|
||||
*/
|
||||
const onChannelCleared = function (ctx, channel) {
|
||||
historyKeeperBroadcast(ctx, channel, {
|
||||
const onChannelCleared = function (Server, channel) {
|
||||
Server.channelBroadcast(channel, {
|
||||
error: 'ECLEARED',
|
||||
channel: channel
|
||||
});
|
||||
}, HISTORY_KEEPER_ID);
|
||||
};
|
||||
|
||||
// When a channel is removed from datastore, broadcast a message to all its connected users
|
||||
const onChannelDeleted = function (ctx, channel) {
|
||||
const onChannelDeleted = function (Server, channel) {
|
||||
store.closeChannel(channel, function () {
|
||||
historyKeeperBroadcast(ctx, channel, {
|
||||
Server.channelBroadcast(channel, {
|
||||
error: 'EDELETED',
|
||||
channel: channel
|
||||
});
|
||||
}, HISTORY_KEEPER_ID);
|
||||
});
|
||||
delete ctx.channels[channel];
|
||||
|
||||
delete channel_cache[channel];
|
||||
Server.clearChannel(channel);
|
||||
delete metadata_cache[channel];
|
||||
};
|
||||
// Check if the selected channel is expired
|
||||
// If it is, remove it from memory and broadcast a message to its members
|
||||
|
||||
const onChannelMetadataChanged = function (ctx, channel, metadata) {
|
||||
if (channel && metadata_cache[channel] && typeof (metadata) === "object") {
|
||||
Log.silly('SET_METADATA_CACHE', 'Channel '+ channel +', metadata: '+ JSON.stringify(metadata));
|
||||
metadata_cache[channel] = metadata;
|
||||
if (ctx.channels[channel] && ctx.channels[channel].index) {
|
||||
ctx.channels[channel].index.metadata = metadata;
|
||||
}
|
||||
historyKeeperBroadcast(ctx, channel, metadata);
|
||||
const onChannelMetadataChanged = function (Server, channel, metadata) {
|
||||
if (!(channel && metadata_cache[channel] && typeof (metadata) === "object")) { return; }
|
||||
Log.silly('SET_METADATA_CACHE', {
|
||||
channel: channel,
|
||||
metadata: JSON.stringify(metadata),
|
||||
});
|
||||
|
||||
metadata_cache[channel] = metadata;
|
||||
|
||||
if (channel_cache[channel] && channel_cache[channel].index) {
|
||||
channel_cache[channel].index.metadata = metadata;
|
||||
}
|
||||
Server.channelBroadcast(channel, metadata, HISTORY_KEEPER_ID);
|
||||
};
|
||||
|
||||
const handleGetHistory = function (ctx, seq, user, parsed) {
|
||||
const handleGetHistory = function (Server, seq, userId, parsed) {
|
||||
// parsed[1] is the channel id
|
||||
// parsed[2] is a validation key or an object containing metadata (optionnal)
|
||||
// parsed[3] is the last known hash (optionnal)
|
||||
sendMsg(ctx, user, [seq, 'ACK']);
|
||||
|
||||
Server.send(userId, [seq, 'ACK']);
|
||||
var channelName = parsed[1];
|
||||
var config = parsed[2];
|
||||
var metadata = {};
|
||||
|
@ -771,7 +726,7 @@ module.exports.create = function (cfg) {
|
|||
unfortunately, we can't just serve it blindly, since then young channels will
|
||||
send the metadata twice, so let's do a quick check of what we're going to serve...
|
||||
*/
|
||||
getIndex(ctx, channelName, waitFor((err, index) => {
|
||||
getIndex(channelName, waitFor((err, index) => {
|
||||
/* if there's an error here, it should be encountered
|
||||
and handled by the next nThen block.
|
||||
so, let's just fall through...
|
||||
|
@ -785,32 +740,32 @@ module.exports.create = function (cfg) {
|
|||
if (!index || !index.metadata) { return void w(); }
|
||||
// And then check if the channel is expired. If it is, send the error and abort
|
||||
// FIXME this is hard to read because 'checkExpired' has side effects
|
||||
if (checkExpired(ctx, channelName)) { return void waitFor.abort(); }
|
||||
if (checkExpired(Server, channelName)) { return void waitFor.abort(); }
|
||||
// always send metadata with GET_HISTORY requests
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(index.metadata)], w);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(index.metadata)], w);
|
||||
}));
|
||||
}).nThen(() => {
|
||||
let msgCount = 0;
|
||||
|
||||
// TODO compute lastKnownHash in a manner such that it will always skip past the metadata line?
|
||||
getHistoryAsync(ctx, channelName, lastKnownHash, false, (msg, readMore) => {
|
||||
if (!msg) { return; }
|
||||
getHistoryAsync(channelName, lastKnownHash, false, (msg, readMore) => {
|
||||
if (!msg) { return; } // XXX
|
||||
msgCount++;
|
||||
// avoid sending the metadata message a second time
|
||||
if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); }
|
||||
if (txid) { msg[0] = txid; }
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)], readMore);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore);
|
||||
}, (err) => {
|
||||
if (err && err.code !== 'ENOENT') {
|
||||
if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); }
|
||||
const parsedMsg = {error:err.message, channel: channelName, txid: txid};
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
|
||||
return;
|
||||
}
|
||||
|
||||
const chan = ctx.channels[channelName];
|
||||
const chan = channel_cache[channelName];
|
||||
|
||||
if (msgCount === 0 && !metadata_cache[channelName] && chan && chan.indexOf(user) > -1) {
|
||||
if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) {
|
||||
metadata_cache[channelName] = metadata;
|
||||
|
||||
// the index will have already been constructed and cached at this point
|
||||
|
@ -847,21 +802,23 @@ module.exports.create = function (cfg) {
|
|||
}
|
||||
});
|
||||
}
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(metadata)]);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]);
|
||||
}
|
||||
|
||||
// End of history message:
|
||||
let parsedMsg = {state: 1, channel: channelName, txid: txid};
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]);
|
||||
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const handleGetHistoryRange = function (ctx, seq, user, parsed) {
|
||||
const handleGetHistoryRange = function (Server, seq, userId, parsed) {
|
||||
var channelName = parsed[1];
|
||||
var map = parsed[2];
|
||||
|
||||
if (!(map && typeof(map) === 'object')) {
|
||||
return void sendMsg(ctx, user, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]);
|
||||
return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]);
|
||||
}
|
||||
|
||||
var oldestKnownHash = map.from;
|
||||
|
@ -869,14 +826,14 @@ module.exports.create = function (cfg) {
|
|||
var desiredCheckpoint = map.cpCount;
|
||||
var txid = map.txid;
|
||||
if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') {
|
||||
return void sendMsg(ctx, user, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]);
|
||||
return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]);
|
||||
}
|
||||
|
||||
if (!txid) {
|
||||
return void sendMsg(ctx, user, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]);
|
||||
return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]);
|
||||
}
|
||||
|
||||
sendMsg(ctx, user, [seq, 'ACK']);
|
||||
Server.send(userId, [seq, 'ACK']);
|
||||
return void getOlderHistory(channelName, oldestKnownHash, function (messages) {
|
||||
var toSend = [];
|
||||
if (typeof (desiredMessages) === "number") {
|
||||
|
@ -892,98 +849,104 @@ module.exports.create = function (cfg) {
|
|||
}
|
||||
}
|
||||
toSend.forEach(function (msg) {
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id,
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId,
|
||||
JSON.stringify(['HISTORY_RANGE', txid, msg])]);
|
||||
});
|
||||
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id,
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId,
|
||||
JSON.stringify(['HISTORY_RANGE_END', txid, channelName])
|
||||
]);
|
||||
});
|
||||
};
|
||||
|
||||
const handleGetFullHistory = function (ctx, seq, user, parsed) {
|
||||
const handleGetFullHistory = function (Server, seq, userId, parsed) {
|
||||
// parsed[1] is the channel id
|
||||
// parsed[2] is a validation key (optionnal)
|
||||
// parsed[3] is the last known hash (optionnal)
|
||||
sendMsg(ctx, user, [seq, 'ACK']);
|
||||
|
||||
Server.send(userId, [seq, 'ACK']);
|
||||
|
||||
// FIXME should we send metadata here too?
|
||||
// none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22)
|
||||
return void getHistoryAsync(ctx, parsed[1], -1, false, (msg, readMore) => {
|
||||
return void getHistoryAsync(parsed[1], -1, false, (msg, readMore) => {
|
||||
if (!msg) { return; }
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(['FULL_HISTORY', msg])], readMore);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore);
|
||||
}, (err) => {
|
||||
let parsedMsg = ['FULL_HISTORY_END', parsed[1]];
|
||||
if (err) {
|
||||
Log.error('HK_GET_FULL_HISTORY', err.stack);
|
||||
parsedMsg = ['ERROR', parsed[1], err.message];
|
||||
}
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]);
|
||||
});
|
||||
};
|
||||
|
||||
const handleRPC = function (ctx, seq, user, parsed) {
|
||||
const handleRPC = function (Server, seq, userId, parsed) {
|
||||
if (typeof(rpc) !== 'function') { return; }
|
||||
|
||||
/* RPC Calls... */
|
||||
var rpc_call = parsed.slice(1);
|
||||
|
||||
sendMsg(ctx, user, [seq, 'ACK']);
|
||||
Server.send(userId, [seq, 'ACK']);
|
||||
try {
|
||||
// slice off the sequence number and pass in the rest of the message
|
||||
rpc(ctx, rpc_call, function (err, output) {
|
||||
rpc(Server, rpc_call, function (err, output) {
|
||||
if (err) {
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', err])]);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]);
|
||||
return;
|
||||
}
|
||||
var msg = rpc_call[0].slice();
|
||||
if (msg[3] === 'REMOVE_OWNED_CHANNEL') {
|
||||
onChannelDeleted(ctx, msg[4]);
|
||||
onChannelDeleted(Server, msg[4]);
|
||||
}
|
||||
if (msg[3] === 'CLEAR_OWNED_CHANNEL') {
|
||||
onChannelCleared(ctx, msg[4]);
|
||||
onChannelCleared(Server, msg[4]);
|
||||
}
|
||||
|
||||
if (msg[3] === 'SET_METADATA') { // or whatever we call the RPC????
|
||||
// make sure we update our cache of metadata
|
||||
// or at least invalidate it and force other mechanisms to recompute its state
|
||||
// 'output' could be the new state as computed by rpc
|
||||
onChannelMetadataChanged(ctx, msg[4].channel, output[1]);
|
||||
onChannelMetadataChanged(Server, msg[4].channel, output[1]);
|
||||
}
|
||||
|
||||
// unauthenticated RPC calls have a different message format
|
||||
if (msg[0] === "WRITE_PRIVATE_MESSAGE" && output && output.channel) {
|
||||
// this is an inline reimplementation of historyKeeperBroadcast
|
||||
// because if we use that directly it will bypass signature validation
|
||||
// which opens up the user to malicious behaviour
|
||||
let chan = ctx.channels[output.channel];
|
||||
if (chan && chan.length) {
|
||||
chan.forEach(function (user) {
|
||||
sendMsg(ctx, user, output.message);
|
||||
//[0, null, 'MSG', user.id, JSON.stringify(output.message)]);
|
||||
});
|
||||
}
|
||||
// clients don't validate messages sent by the historyKeeper
|
||||
// so this broadcast needs to come from a different id
|
||||
// we pass 'null' to indicate that it's not coming from a real user
|
||||
// to ensure that they know not to trust this message
|
||||
Server.getChannelUserList(output.channel).forEach(function (userId) {
|
||||
Server.send(userId, output.message);
|
||||
});
|
||||
|
||||
// rpc and anonRpc expect their responses to be of a certain length
|
||||
// and we've already used the output of the rpc call, so overwrite it
|
||||
output = [null, null, null];
|
||||
}
|
||||
|
||||
// finally, send a response to the client that sent the RPC
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0]].concat(output))]);
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]);
|
||||
});
|
||||
} catch (e) {
|
||||
sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]);
|
||||
// if anything throws in the middle, send an error
|
||||
Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]);
|
||||
}
|
||||
};
|
||||
|
||||
const directMessageCommands = {
|
||||
GET_HISTORY: handleGetHistory,
|
||||
GET_HISTORY_RANGE: handleGetHistoryRange,
|
||||
GET_FULL_HISTORY: handleGetFullHistory,
|
||||
};
|
||||
|
||||
/* onDirectMessage
|
||||
* exported for use by the netflux-server
|
||||
* parses and handles all direct messages directed to the history keeper
|
||||
* check if it's expired and execute all the associated side-effects
|
||||
* routes queries to the appropriate handlers
|
||||
*/
|
||||
const onDirectMessage = function (ctx, seq, user, json) {
|
||||
const onDirectMessage = function (Server, seq, userId, json) {
|
||||
Log.silly('HK_MESSAGE', json);
|
||||
|
||||
let parsed;
|
||||
|
@ -997,28 +960,49 @@ module.exports.create = function (cfg) {
|
|||
// If the requested history is for an expired channel, abort
|
||||
// Note the if we don't have the keys for that channel in metadata_cache, we'll
|
||||
// have to abort later (once we know the expiration time)
|
||||
if (checkExpired(ctx, parsed[1])) { return; }
|
||||
if (checkExpired(Server, parsed[1])) { return; }
|
||||
|
||||
if (parsed[0] === 'GET_HISTORY') {
|
||||
return void handleGetHistory(ctx, seq, user, parsed);
|
||||
}
|
||||
if (parsed[0] === 'GET_HISTORY_RANGE') {
|
||||
return void handleGetHistoryRange(ctx, seq, user, parsed);
|
||||
}
|
||||
if (parsed[0] === 'GET_FULL_HISTORY') {
|
||||
return void handleGetFullHistory(ctx, seq, user, parsed);
|
||||
}
|
||||
return void handleRPC(ctx, seq, user, parsed);
|
||||
// look up the appropriate command in the map of commands or fall back to RPC
|
||||
var command = directMessageCommands[parsed[0]] || handleRPC;
|
||||
|
||||
// run the command with the standard function signature
|
||||
command(Server, seq, userId, parsed);
|
||||
};
|
||||
|
||||
return {
|
||||
cfg.historyKeeper = {
|
||||
id: HISTORY_KEEPER_ID,
|
||||
setConfig: setConfig,
|
||||
onChannelMessage: onChannelMessage,
|
||||
dropChannel: dropChannel,
|
||||
checkExpired: checkExpired,
|
||||
onDirectMessage: onDirectMessage,
|
||||
};
|
||||
};
|
||||
|
||||
}());
|
||||
channelMessage: function (Server, channel, msgStruct) {
|
||||
// netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel
|
||||
// historyKeeper stores these messages if the channel id indicates that they are
|
||||
// a channel type with permanent history
|
||||
onChannelMessage(Server, channel, msgStruct);
|
||||
},
|
||||
channelClose: function (channelName) {
|
||||
// netflux-server emits 'channelClose' events whenever everyone leaves a channel
|
||||
// we drop cached metadata and indexes at the same time
|
||||
dropChannel(channelName);
|
||||
},
|
||||
channelOpen: function (Server, channelName, userId) {
|
||||
channel_cache[channelName] = {};
|
||||
Server.send(userId, [
|
||||
0,
|
||||
HISTORY_KEEPER_ID,
|
||||
'JOIN',
|
||||
channelName
|
||||
]);
|
||||
},
|
||||
directMessage: function (Server, seq, userId, json) {
|
||||
// netflux-server allows you to register an id with a handler
|
||||
// this handler is invoked every time someone sends a message to that id
|
||||
onDirectMessage(Server, seq, userId, json);
|
||||
},
|
||||
};
|
||||
|
||||
RPC.create(cfg, function (err, _rpc) {
|
||||
if (err) { throw err; }
|
||||
|
||||
rpc = _rpc;
|
||||
cb(void 0, cfg.historyKeeper);
|
||||
});
|
||||
};
|
|
@ -0,0 +1,33 @@
|
|||
var HK = module.exports;
|
||||
|
||||
/* getHash
|
||||
* this function slices off the leading portion of a message which is
|
||||
most likely unique
|
||||
* these "hashes" are used to identify particular messages in a channel's history
|
||||
* clients store "hashes" either in memory or in their drive to query for new messages:
|
||||
* when reconnecting to a pad
|
||||
* when connecting to chat or a mailbox
|
||||
* thus, we can't change this function without invalidating client data which:
|
||||
* is encrypted clientside
|
||||
* can't be easily migrated
|
||||
* don't break it!
|
||||
*/
|
||||
HK.getHash = function (msg, Log) {
|
||||
if (typeof(msg) !== 'string') {
|
||||
if (Log) {
|
||||
Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg);
|
||||
}
|
||||
return '';
|
||||
}
|
||||
return msg.slice(0,64);
|
||||
};
|
||||
|
||||
// historyKeeper should explicitly store any channel
|
||||
// with a 32 character id
|
||||
HK.STANDARD_CHANNEL_LENGTH = 32;
|
||||
|
||||
// historyKeeper should not store messages sent to any channel
|
||||
// with a 34 character id
|
||||
HK.EPHEMERAL_CHANNEL_LENGTH = 34;
|
||||
|
||||
|
|
@ -211,12 +211,14 @@ Meta.createLineHandler = function (ref, errorHandler) {
|
|||
line: JSON.stringify(line),
|
||||
});
|
||||
}
|
||||
|
||||
// the case above is special, everything else should increment the index
|
||||
var index = ref.index++;
|
||||
if (typeof(line) === 'undefined') { return; }
|
||||
|
||||
if (Array.isArray(line)) {
|
||||
try {
|
||||
handleCommand(ref.meta, line);
|
||||
ref.index++;
|
||||
} catch (err2) {
|
||||
errorHandler("METADATA_COMMAND_ERR", {
|
||||
error: err2.stack,
|
||||
|
@ -226,8 +228,15 @@ Meta.createLineHandler = function (ref, errorHandler) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (ref.index === 0 && typeof(line) === 'object') {
|
||||
ref.index++;
|
||||
// the first line of a channel is processed before the dedicated metadata log.
|
||||
// it can contain a map, in which case it should be used as the initial state.
|
||||
// it's possible that a trim-history command was interrupted, in which case
|
||||
// this first message might exist in parallel with the more recent metadata log
|
||||
// which will contain the computed state of the previous metadata log
|
||||
// which has since been archived.
|
||||
// Thus, accept both the first and second lines you process as valid initial state
|
||||
// preferring the second if it exists
|
||||
if (index < 2 && line && typeof(line) === 'object') {
|
||||
// special case!
|
||||
ref.meta = line;
|
||||
return;
|
||||
|
@ -235,7 +244,7 @@ Meta.createLineHandler = function (ref, errorHandler) {
|
|||
|
||||
errorHandler("METADATA_HANDLER_WEIRDLINE", {
|
||||
line: line,
|
||||
index: ref.index++,
|
||||
index: index,
|
||||
});
|
||||
};
|
||||
};
|
||||
|
|
|
@ -0,0 +1,399 @@
|
|||
/*jshint esversion: 6 */
|
||||
const nThen = require("nthen");
|
||||
|
||||
const Util = require("./common-util");
|
||||
const mkEvent = Util.mkEvent;
|
||||
|
||||
const Core = require("./commands/core");
|
||||
const Admin = require("./commands/admin-rpc");
|
||||
const Pinning = require("./commands/pin-rpc");
|
||||
const Quota = require("./commands/quota");
|
||||
const Block = require("./commands/block");
|
||||
const Metadata = require("./commands/metadata");
|
||||
const Channel = require("./commands/channel");
|
||||
const Upload = require("./commands/upload");
|
||||
|
||||
var RPC = module.exports;
|
||||
|
||||
const Store = require("../storage/file");
|
||||
const BlobStore = require("../storage/blob");
|
||||
|
||||
const UNAUTHENTICATED_CALLS = [
|
||||
'GET_FILE_SIZE',
|
||||
'GET_METADATA',
|
||||
'GET_MULTIPLE_FILE_SIZE',
|
||||
'IS_CHANNEL_PINNED',
|
||||
'IS_NEW_CHANNEL',
|
||||
'GET_DELETED_PADS',
|
||||
'WRITE_PRIVATE_MESSAGE',
|
||||
];
|
||||
|
||||
var isUnauthenticatedCall = function (call) {
|
||||
return UNAUTHENTICATED_CALLS.indexOf(call) !== -1;
|
||||
};
|
||||
|
||||
const AUTHENTICATED_CALLS = [
|
||||
'COOKIE',
|
||||
'RESET',
|
||||
'PIN',
|
||||
'UNPIN',
|
||||
'GET_HASH',
|
||||
'GET_TOTAL_SIZE',
|
||||
'UPDATE_LIMITS',
|
||||
'GET_LIMIT',
|
||||
'UPLOAD_STATUS',
|
||||
'UPLOAD_COMPLETE',
|
||||
'OWNED_UPLOAD_COMPLETE',
|
||||
'UPLOAD_CANCEL',
|
||||
'EXPIRE_SESSION',
|
||||
'TRIM_OWNED_CHANNEL_HISTORY',
|
||||
'CLEAR_OWNED_CHANNEL',
|
||||
'REMOVE_OWNED_CHANNEL',
|
||||
'REMOVE_PINS',
|
||||
'TRIM_PINS',
|
||||
'WRITE_LOGIN_BLOCK',
|
||||
'REMOVE_LOGIN_BLOCK',
|
||||
'ADMIN',
|
||||
'SET_METADATA'
|
||||
];
|
||||
|
||||
var isAuthenticatedCall = function (call) {
|
||||
return AUTHENTICATED_CALLS.indexOf(call) !== -1;
|
||||
};
|
||||
|
||||
var isUnauthenticateMessage = function (msg) {
|
||||
return msg && msg.length === 2 && isUnauthenticatedCall(msg[0]);
|
||||
};
|
||||
|
||||
var handleUnauthenticatedMessage = function (Env, msg, respond, Server) {
|
||||
Env.Log.silly('LOG_RPC', msg[0]);
|
||||
switch (msg[0]) {
|
||||
case 'GET_FILE_SIZE':
|
||||
return void Pinning.getFileSize(Env, msg[1], function (e, size) {
|
||||
Env.WARN(e, msg[1]);
|
||||
respond(e, [null, size, null]);
|
||||
});
|
||||
case 'GET_METADATA':
|
||||
return void Metadata.getMetadata(Env, msg[1], function (e, data) {
|
||||
Env.WARN(e, msg[1]);
|
||||
respond(e, [null, data, null]);
|
||||
});
|
||||
case 'GET_MULTIPLE_FILE_SIZE': // XXX not actually used on the client?
|
||||
return void Pinning.getMultipleFileSize(Env, msg[1], function (e, dict) {
|
||||
if (e) {
|
||||
Env.WARN(e, dict);
|
||||
return respond(e);
|
||||
}
|
||||
respond(e, [null, dict, null]);
|
||||
});
|
||||
case 'GET_DELETED_PADS':
|
||||
return void Pinning.getDeletedPads(Env, msg[1], function (e, list) {
|
||||
if (e) {
|
||||
Env.WARN(e, msg[1]);
|
||||
return respond(e);
|
||||
}
|
||||
respond(e, [null, list, null]);
|
||||
});
|
||||
case 'IS_CHANNEL_PINNED':
|
||||
return void Pinning.isChannelPinned(Env, msg[1], function (isPinned) {
|
||||
respond(null, [null, isPinned, null]);
|
||||
});
|
||||
case 'IS_NEW_CHANNEL':
|
||||
return void Channel.isNewChannel(Env, msg[1], function (e, isNew) {
|
||||
respond(e, [null, isNew, null]);
|
||||
});
|
||||
case 'WRITE_PRIVATE_MESSAGE':
|
||||
return void Channel.writePrivateMessage(Env, msg[1], Server, function (e, output) {
|
||||
respond(e, output);
|
||||
});
|
||||
default:
|
||||
Env.Log.warn("UNSUPPORTED_RPC_CALL", msg);
|
||||
return respond('UNSUPPORTED_RPC_CALL', msg);
|
||||
}
|
||||
};
|
||||
|
||||
const AUTHENTICATED_USER_TARGETED = {
|
||||
RESET: Pinning.resetUserPins,
|
||||
PIN: Pinning.pinChannel,
|
||||
UNPIN: Pinning.unpinChannel,
|
||||
CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel,
|
||||
REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel,
|
||||
UPLOAD_STATUS: Upload.status,
|
||||
UPLOAD: Upload.upload,
|
||||
UPLOAD_COMPLETE: Upload.complete,
|
||||
UPLOAD_CANCEL: Upload.cancel,
|
||||
OWNED_UPLOAD_COMPLETE: Upload.complete_owned,
|
||||
};
|
||||
|
||||
const AUTHENTICATED_USER_SCOPED = {
|
||||
GET_HASH: Pinning.getHash,
|
||||
GET_TOTAL_SIZE: Pinning.getTotalSize,
|
||||
UPDATE_LIMITS: Quota.updateLimits,
|
||||
GET_LIMIT: Pinning.getLimit,
|
||||
EXPIRE_SESSION: Core.expireSessionAsync,
|
||||
REMOVE_PINS: Pinning.removePins,
|
||||
TRIM_PINS: Pinning.trimPins,
|
||||
SET_METADATA: Metadata.setMetadata,
|
||||
};
|
||||
|
||||
var handleAuthenticatedMessage = function (Env, map) {
|
||||
var msg = map.msg;
|
||||
var safeKey = map.safeKey;
|
||||
var publicKey = map.publicKey;
|
||||
var Respond = map.Respond;
|
||||
var Server = map.Server;
|
||||
|
||||
var TYPE = msg[0];
|
||||
|
||||
Env.Log.silly('LOG_RPC', TYPE);
|
||||
|
||||
if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') {
|
||||
return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) {
|
||||
Env.WARN(e, value);
|
||||
return void Respond(e, value);
|
||||
});
|
||||
}
|
||||
|
||||
if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') {
|
||||
return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) {
|
||||
if (e) {
|
||||
Env.WARN(e, safeKey);
|
||||
return void Respond(e);
|
||||
}
|
||||
Respond(e, value);
|
||||
});
|
||||
}
|
||||
|
||||
switch (msg[0]) {
|
||||
case 'COOKIE': return void Respond(void 0);
|
||||
case 'TRIM_OWNED_CHANNEL_HISTORY':
|
||||
return void Channel.removeOwnedChannelHistory(Env, msg[1], publicKey, msg[2], function (e) { // XXX USER_TARGETED_DOUBLE
|
||||
if (e) { return void Respond(e); }
|
||||
Respond(void 0, 'OK');
|
||||
});
|
||||
case 'WRITE_LOGIN_BLOCK':
|
||||
return void Block.writeLoginBlock(Env, msg[1], function (e) { // XXX SPECIAL
|
||||
if (e) {
|
||||
Env.WARN(e, 'WRITE_LOGIN_BLOCK');
|
||||
return void Respond(e);
|
||||
}
|
||||
Respond(e);
|
||||
});
|
||||
case 'REMOVE_LOGIN_BLOCK':
|
||||
return void Block.removeLoginBlock(Env, msg[1], function (e) { // XXX SPECIAL
|
||||
if (e) {
|
||||
Env.WARN(e, 'REMOVE_LOGIN_BLOCK');
|
||||
return void Respond(e);
|
||||
}
|
||||
Respond(e);
|
||||
});
|
||||
case 'ADMIN':
|
||||
return void Admin.command(Env, Server, safeKey, msg[1], function (e, result) { // XXX SPECIAL
|
||||
if (e) {
|
||||
Env.WARN(e, result);
|
||||
return void Respond(e);
|
||||
}
|
||||
Respond(void 0, result);
|
||||
});
|
||||
default:
|
||||
console.log(msg);
|
||||
throw new Error("OOPS");
|
||||
return void Respond('UNSUPPORTED_RPC_CALL', msg);
|
||||
}
|
||||
};
|
||||
|
||||
var rpc = function (Env, Server, data, respond) {
|
||||
if (!Array.isArray(data)) {
|
||||
Env.Log.debug('INVALID_ARG_FORMET', data);
|
||||
return void respond('INVALID_ARG_FORMAT');
|
||||
}
|
||||
|
||||
if (!data.length) {
|
||||
return void respond("INSUFFICIENT_ARGS");
|
||||
} else if (data.length !== 1) {
|
||||
Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data);
|
||||
}
|
||||
|
||||
var msg = data[0].slice(0);
|
||||
|
||||
if (!Array.isArray(msg)) {
|
||||
return void respond('INVALID_ARG_FORMAT');
|
||||
}
|
||||
|
||||
if (isUnauthenticateMessage(msg)) {
|
||||
return handleUnauthenticatedMessage(Env, msg, respond, Server);
|
||||
}
|
||||
|
||||
var signature = msg.shift();
|
||||
var publicKey = msg.shift();
|
||||
|
||||
// make sure a user object is initialized in the cookie jar
|
||||
if (publicKey) {
|
||||
Core.getSession(Env.Sessions, publicKey);
|
||||
} else {
|
||||
Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey);
|
||||
}
|
||||
|
||||
var cookie = msg[0];
|
||||
if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) {
|
||||
// no cookie is fine if the RPC is to get a cookie
|
||||
if (msg[1] !== 'COOKIE') {
|
||||
return void respond('NO_COOKIE');
|
||||
}
|
||||
}
|
||||
|
||||
var serialized = JSON.stringify(msg);
|
||||
|
||||
if (!(serialized && typeof(publicKey) === 'string')) {
|
||||
return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY');
|
||||
}
|
||||
|
||||
if (isAuthenticatedCall(msg[1])) {
|
||||
if (Core.checkSignature(Env, serialized, signature, publicKey) !== true) {
|
||||
return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY");
|
||||
}
|
||||
} else if (msg[1] !== 'UPLOAD') {
|
||||
Env.Log.warn('INVALID_RPC_CALL', msg[1]);
|
||||
return void respond("INVALID_RPC_CALL");
|
||||
}
|
||||
|
||||
var safeKey = Util.escapeKeyCharacters(publicKey);
|
||||
/* If you have gotten this far, you have signed the message with the
|
||||
public key which you provided.
|
||||
|
||||
We can safely modify the state for that key
|
||||
|
||||
OR it's an unauthenticated call, which must not modify the state
|
||||
for that key in a meaningful way.
|
||||
*/
|
||||
|
||||
// discard validated cookie from message
|
||||
msg.shift();
|
||||
|
||||
var Respond = function (e, msg) {
|
||||
var session = Env.Sessions[safeKey];
|
||||
var token = session? session.tokens.slice(-1)[0]: '';
|
||||
var cookie = Core.makeCookie(token).join('|');
|
||||
respond(e ? String(e): e, [cookie].concat(typeof(msg) !== 'undefined' ?msg: []));
|
||||
};
|
||||
|
||||
if (typeof(msg) !== 'object' || !msg.length) {
|
||||
return void Respond('INVALID_MSG');
|
||||
}
|
||||
|
||||
handleAuthenticatedMessage(Env, {
|
||||
msg: msg,
|
||||
safeKey: safeKey,
|
||||
publicKey: publicKey,
|
||||
Respond: Respond,
|
||||
Server: Server,
|
||||
});
|
||||
};
|
||||
|
||||
RPC.create = function (config, cb) {
|
||||
var Log = config.log;
|
||||
|
||||
// load pin-store...
|
||||
Log.silly('LOADING RPC MODULE');
|
||||
|
||||
var keyOrDefaultString = function (key, def) {
|
||||
return typeof(config[key]) === 'string'? config[key]: def;
|
||||
};
|
||||
|
||||
var WARN = function (e, output) {
|
||||
if (e && output) {
|
||||
Log.warn(e, {
|
||||
output: output,
|
||||
message: String(e),
|
||||
stack: new Error(e).stack,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
var Env = {
|
||||
historyKeeper: config.historyKeeper,
|
||||
intervals: config.intervals || {},
|
||||
defaultStorageLimit: config.defaultStorageLimit,
|
||||
maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024),
|
||||
Sessions: {},
|
||||
paths: {},
|
||||
msgStore: config.store,
|
||||
pinStore: undefined,
|
||||
pinnedPads: {},
|
||||
evPinnedPadsReady: mkEvent(true),
|
||||
limits: {},
|
||||
admins: [],
|
||||
Log: Log,
|
||||
WARN: WARN,
|
||||
flushCache: config.flushCache,
|
||||
adminEmail: config.adminEmail,
|
||||
allowSubscriptions: config.allowSubscriptions,
|
||||
myDomain: config.myDomain,
|
||||
mySubdomain: config.mySubdomain,
|
||||
customLimits: config.customLimits,
|
||||
domain: config.domain // XXX
|
||||
};
|
||||
|
||||
try {
|
||||
Env.admins = (config.adminKeys || []).map(function (k) {
|
||||
k = k.replace(/\/+$/, '');
|
||||
var s = k.split('/');
|
||||
return s[s.length-1];
|
||||
});
|
||||
} catch (e) {
|
||||
console.error("Can't parse admin keys. Please update or fix your config.js file!");
|
||||
}
|
||||
|
||||
var Sessions = Env.Sessions;
|
||||
var paths = Env.paths;
|
||||
var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins');
|
||||
paths.block = keyOrDefaultString('blockPath', './block');
|
||||
paths.data = keyOrDefaultString('filePath', './datastore');
|
||||
paths.staging = keyOrDefaultString('blobStagingPath', './blobstage');
|
||||
paths.blob = keyOrDefaultString('blobPath', './blob');
|
||||
|
||||
var updateLimitDaily = function () {
|
||||
Quota.updateLimits(Env, undefined, function (e) {
|
||||
if (e) {
|
||||
WARN('limitUpdate', e);
|
||||
}
|
||||
});
|
||||
};
|
||||
Quota.applyCustomLimits(Env);
|
||||
updateLimitDaily();
|
||||
Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000);
|
||||
|
||||
Pinning.loadChannelPins(Env);
|
||||
|
||||
nThen(function (w) {
|
||||
Store.create({
|
||||
filePath: pinPath,
|
||||
}, w(function (s) {
|
||||
Env.pinStore = s;
|
||||
}));
|
||||
BlobStore.create({
|
||||
blobPath: config.blobPath,
|
||||
blobStagingPath: config.blobStagingPath,
|
||||
archivePath: config.archivePath,
|
||||
getSession: function (safeKey) {
|
||||
return Core.getSession(Sessions, safeKey);
|
||||
},
|
||||
}, w(function (err, blob) {
|
||||
if (err) { throw new Error(err); }
|
||||
Env.blobStore = blob;
|
||||
}));
|
||||
}).nThen(function () {
|
||||
cb(void 0, function (Server, data, respond) {
|
||||
try {
|
||||
return rpc(Env, Server, data, respond);
|
||||
} catch (e) {
|
||||
console.log("Error from RPC with data " + JSON.stringify(data));
|
||||
console.log(e.stack);
|
||||
}
|
||||
});
|
||||
// expire old sessions once per minute
|
||||
Env.intervals.sessionExpirationInterval = setInterval(function () {
|
||||
Core.expireSessions(Sessions);
|
||||
}, Core.SESSION_EXPIRATION_TIME);
|
||||
});
|
||||
};
|
|
@ -0,0 +1,172 @@
|
|||
var WriteQueue = require("./write-queue");
|
||||
var Util = require("./common-util");
|
||||
|
||||
/* This module provides implements a FIFO scheduler
|
||||
which assumes the existence of three types of async tasks:
|
||||
|
||||
1. ordered tasks which must be executed sequentially
|
||||
2. unordered tasks which can be executed in parallel
|
||||
3. blocking tasks which must block the execution of all other tasks
|
||||
|
||||
The scheduler assumes there will be many resources identified by strings,
|
||||
and that the constraints described above will only apply in the context
|
||||
of identical string ids.
|
||||
|
||||
Many blocking tasks may be executed in parallel so long as they
|
||||
concern resources identified by different ids.
|
||||
|
||||
USAGE:
|
||||
|
||||
const schedule = require("./schedule")();
|
||||
|
||||
// schedule two sequential tasks using the resource 'pewpew'
|
||||
schedule.ordered('pewpew', function (next) {
|
||||
appendToFile('beep\n', next);
|
||||
});
|
||||
schedule.ordered('pewpew', function (next) {
|
||||
appendToFile('boop\n', next);
|
||||
});
|
||||
|
||||
// schedule a task that can happen whenever
|
||||
schedule.unordered('pewpew', function (next) {
|
||||
displayFileSize(next);
|
||||
});
|
||||
|
||||
// schedule a blocking task which will wait
|
||||
// until the all unordered tasks have completed before commencing
|
||||
schedule.blocking('pewpew', function (next) {
|
||||
deleteFile(next);
|
||||
});
|
||||
|
||||
// this will be queued for after the blocking task
|
||||
schedule.ordered('pewpew', function (next) {
|
||||
appendFile('boom', next);
|
||||
});
|
||||
|
||||
*/
|
||||
|
||||
// return a uid which is not already in a map
|
||||
var unusedUid = function (set) {
|
||||
var uid = Util.uid();
|
||||
if (set[uid]) { return unusedUid(); }
|
||||
return uid;
|
||||
};
|
||||
|
||||
// return an existing session, creating one if it does not already exist
|
||||
var lookup = function (map, id) {
|
||||
return (map[id] = map[id] || {
|
||||
//blocking: [],
|
||||
active: {},
|
||||
blocked: {},
|
||||
});
|
||||
};
|
||||
|
||||
var isEmpty = function (map) {
|
||||
for (var key in map) {
|
||||
if (map.hasOwnProperty(key)) { return false; }
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
module.exports = function () {
|
||||
// every scheduler instance has its own queue
|
||||
var queue = WriteQueue();
|
||||
|
||||
// ordered tasks don't require any extra logic
|
||||
var Ordered = function (id, task) {
|
||||
queue(id, task);
|
||||
};
|
||||
|
||||
// unordered and blocking tasks need a little extra state
|
||||
var map = {};
|
||||
|
||||
// regular garbage collection keeps memory consumption low
|
||||
var collectGarbage = function (id) {
|
||||
// avoid using 'lookup' since it creates a session implicitly
|
||||
var local = map[id];
|
||||
// bail out if no session
|
||||
if (!local) { return; }
|
||||
// bail out if there are blocking or active tasks
|
||||
if (local.lock) { return; }
|
||||
if (!isEmpty(local.active)) { return; }
|
||||
// if there are no pending actions then delete the session
|
||||
delete map[id];
|
||||
};
|
||||
|
||||
// unordered tasks run immediately if there are no blocking tasks scheduled
|
||||
// or immediately after blocking tasks finish
|
||||
var runImmediately = function (local, task) {
|
||||
// set a flag in the map of active unordered tasks
|
||||
// to prevent blocking tasks from running until you finish
|
||||
var uid = unusedUid(local.active);
|
||||
local.active[uid] = true;
|
||||
|
||||
task(function () {
|
||||
// remove the flag you set to indicate that your task completed
|
||||
delete local.active[uid];
|
||||
// don't do anything if other unordered tasks are still running
|
||||
if (!isEmpty(local.active)) { return; }
|
||||
// bail out if there are no blocking tasks scheduled or ready
|
||||
if (typeof(local.waiting) !== 'function') {
|
||||
return void collectGarbage();
|
||||
}
|
||||
setTimeout(local.waiting);
|
||||
});
|
||||
};
|
||||
|
||||
var runOnceUnblocked = function (local, task) {
|
||||
var uid = unusedUid(local.blocked);
|
||||
local.blocked[uid] = function () {
|
||||
runImmediately(local, task);
|
||||
};
|
||||
};
|
||||
|
||||
// 'unordered' tasks are scheduled to run in after the most recently received blocking task
|
||||
// or immediately and in parallel if there are no blocking tasks scheduled.
|
||||
var Unordered = function (id, task) {
|
||||
var local = lookup(map, id);
|
||||
if (local.lock) { return runOnceUnblocked(local, task); }
|
||||
runImmediately(local, task);
|
||||
};
|
||||
|
||||
var runBlocked = function (local) {
|
||||
for (var task in local.blocked) {
|
||||
runImmediately(local, local.blocked[task]);
|
||||
}
|
||||
};
|
||||
|
||||
// 'blocking' tasks must be run alone.
|
||||
// They are queued alongside ordered tasks,
|
||||
// and wait until any running 'unordered' tasks complete before commencing.
|
||||
var Blocking = function (id, task) {
|
||||
var local = lookup(map, id);
|
||||
|
||||
queue(id, function (next) {
|
||||
// start right away if there are no running unordered tasks
|
||||
if (isEmpty(local.active)) {
|
||||
local.lock = true;
|
||||
return void task(function () {
|
||||
delete local.lock;
|
||||
runBlocked(local);
|
||||
next();
|
||||
});
|
||||
}
|
||||
// otherwise wait until the running tasks have completed
|
||||
local.waiting = function () {
|
||||
local.lock = true;
|
||||
task(function () {
|
||||
delete local.lock;
|
||||
delete local.waiting;
|
||||
runBlocked(local);
|
||||
next();
|
||||
});
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
return {
|
||||
ordered: Ordered,
|
||||
unordered: Unordered,
|
||||
blocking: Blocking,
|
||||
};
|
||||
};
|
|
@ -113,14 +113,13 @@
|
|||
}
|
||||
},
|
||||
"chainpad-server": {
|
||||
"version": "3.0.5",
|
||||
"resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-3.0.5.tgz",
|
||||
"integrity": "sha512-USKOMSHsNjnme81Qy3nQ+ji9eCkBPokYH4T82LVHAI0aayTSCXcTPUDLVGDBCRqe8NsXU4io1WPXn1KiZwB8fA==",
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-4.0.1.tgz",
|
||||
"integrity": "sha512-duV57hO0o2cKaOwwWdDeO3hgN2thAqoQENrjozhamGrUjF9bFiNW2cq3Dg3HjOY6yeMNIGgj0jMuLJjTSERKhQ==",
|
||||
"requires": {
|
||||
"nthen": "^0.1.8",
|
||||
"nthen": "0.1.8",
|
||||
"pull-stream": "^3.6.9",
|
||||
"stream-to-pull-stream": "^1.7.3",
|
||||
"tweetnacl": "~0.12.2",
|
||||
"ws": "^3.3.1"
|
||||
}
|
||||
},
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
},
|
||||
"dependencies": {
|
||||
"chainpad-crypto": "^0.2.2",
|
||||
"chainpad-server": "^3.0.5",
|
||||
"chainpad-server": "^4.0.0",
|
||||
"express": "~4.16.0",
|
||||
"fs-extra": "^7.0.0",
|
||||
"get-folder-size": "^2.0.1",
|
||||
|
@ -40,6 +40,7 @@
|
|||
"package": "PACKAGE=1 node server.js",
|
||||
"lint": "jshint --config .jshintrc --exclude-path .jshintignore . && ./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
|
||||
"lint:js": "jshint --config .jshintrc --exclude-path .jshintignore .",
|
||||
"lint:server": "jshint --config .jshintrc lib",
|
||||
"lint:less": "./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/",
|
||||
"flow": "./node_modules/.bin/flow",
|
||||
"test": "node scripts/TestSelenium.js",
|
||||
|
|
|
@ -15,8 +15,6 @@ var inactiveTime = +new Date() - (config.inactiveTime * 24 * 3600 * 1000);
|
|||
// files which were archived before this date can be considered safe to remove
|
||||
var retentionTime = +new Date() - (config.archiveRetentionTime * 24 * 3600 * 1000);
|
||||
|
||||
var retainData = Boolean(config.retainData);
|
||||
|
||||
var getNewestTime = function (stats) {
|
||||
return stats[['atime', 'ctime', 'mtime'].reduce(function (a, b) {
|
||||
return stats[b] > stats[a]? b: a;
|
||||
|
@ -176,23 +174,6 @@ nThen(function (w) {
|
|||
if (pins[item.blobId]) { return void next(); }
|
||||
if (item && getNewestTime(item) > retentionTime) { return void next(); }
|
||||
|
||||
if (!retainData) {
|
||||
return void blobs.remove.blob(item.blobId, function (err) {
|
||||
if (err) {
|
||||
Log.error("EVICT_BLOB_ERROR", {
|
||||
error: err,
|
||||
item: item,
|
||||
});
|
||||
return void next();
|
||||
}
|
||||
Log.info("EVICT_BLOB_INACTIVE", {
|
||||
item: item,
|
||||
});
|
||||
removed++;
|
||||
next();
|
||||
});
|
||||
}
|
||||
|
||||
blobs.archive.blob(item.blobId, function (err) {
|
||||
if (err) {
|
||||
Log.error("EVICT_ARCHIVE_BLOB_ERROR", {
|
||||
|
@ -247,7 +228,6 @@ nThen(function (w) {
|
|||
Log.info("EVICT_BLOB_PROOFS_REMOVED", removed);
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
var removed = 0;
|
||||
var channels = 0;
|
||||
var archived = 0;
|
||||
|
||||
|
@ -279,42 +259,22 @@ nThen(function (w) {
|
|||
// ignore the channel if it's pinned
|
||||
if (pins[item.channel]) { return void cb(); }
|
||||
|
||||
// if the server is configured to retain data, archive the channel
|
||||
if (config.retainData) {
|
||||
return void store.archiveChannel(item.channel, w(function (err) {
|
||||
if (err) {
|
||||
Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', {
|
||||
error: err,
|
||||
channel: item.channel,
|
||||
});
|
||||
return void cb();
|
||||
}
|
||||
Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel);
|
||||
archived++;
|
||||
cb();
|
||||
}));
|
||||
}
|
||||
|
||||
// otherwise remove it
|
||||
store.removeChannel(item.channel, w(function (err) {
|
||||
return void store.archiveChannel(item.channel, w(function (err) {
|
||||
if (err) {
|
||||
Log.error('EVICT_CHANNEL_REMOVAL_ERROR', {
|
||||
Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', {
|
||||
error: err,
|
||||
channel: item.channel,
|
||||
});
|
||||
return void cb();
|
||||
}
|
||||
Log.info('EVICT_CHANNEL_REMOVAL', item.channel);
|
||||
removed++;
|
||||
Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel);
|
||||
archived++;
|
||||
cb();
|
||||
}));
|
||||
};
|
||||
|
||||
var done = function () {
|
||||
if (config.retainData) {
|
||||
return void Log.info('EVICT_CHANNELS_ARCHIVED', archived);
|
||||
}
|
||||
return void Log.info('EVICT_CHANNELS_REMOVED', removed);
|
||||
return void Log.info('EVICT_CHANNELS_ARCHIVED', archived);
|
||||
};
|
||||
|
||||
store.listChannels(handler, w(done));
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
/* three types of actions:
|
||||
* read
|
||||
* write
|
||||
* append
|
||||
each of which take a random amount of time
|
||||
|
||||
*/
|
||||
var Util = require("../../lib/common-util");
|
||||
var schedule = require("../../lib/schedule")();
|
||||
var nThen = require("nthen");
|
||||
|
||||
var rand = function (n) {
|
||||
return Math.floor(Math.random() * n);
|
||||
};
|
||||
|
||||
var rand_time = function () {
|
||||
// between 51 and 151
|
||||
return rand(300) + 25;
|
||||
};
|
||||
|
||||
var makeAction = function (type) {
|
||||
var i = 0;
|
||||
return function (time) {
|
||||
var j = i++;
|
||||
return function (next) {
|
||||
console.log(" Beginning action: %s#%s", type, j);
|
||||
setTimeout(function () {
|
||||
console.log(" Completed action: %s#%s", type, j);
|
||||
next();
|
||||
}, time);
|
||||
return j;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
var TYPES = ['WRITE', 'READ', 'APPEND'];
|
||||
var chooseAction = function () {
|
||||
var n = rand(100);
|
||||
|
||||
if (n < 50) { return 'APPEND'; }
|
||||
if (n < 90) { return 'READ'; }
|
||||
return 'WRITE';
|
||||
|
||||
//return TYPES[rand(3)];
|
||||
};
|
||||
|
||||
var test = function (script, cb) {
|
||||
var uid = Util.uid();
|
||||
|
||||
var TO_RUN = script.length;
|
||||
var total_run = 0;
|
||||
|
||||
var parallel = 0;
|
||||
var last_run_ordered = -1;
|
||||
//var i = 0;
|
||||
|
||||
var ACTIONS = {};
|
||||
TYPES.forEach(function (type) {
|
||||
ACTIONS[type] = makeAction(type);
|
||||
});
|
||||
|
||||
nThen(function (w) {
|
||||
setTimeout(w(), 3000);
|
||||
// run scripted actions with assertions
|
||||
script.forEach(function (scene) {
|
||||
var type = scene[0];
|
||||
var time = typeof(scene[1]) === 'number'? scene[1]: rand_time();
|
||||
|
||||
var action = ACTIONS[type](time);
|
||||
console.log("Queuing action of type: %s(%s)", type, time);
|
||||
|
||||
var proceed = w();
|
||||
|
||||
switch (type) {
|
||||
case 'APPEND':
|
||||
return schedule.ordered(uid, w(function (next) {
|
||||
parallel++;
|
||||
var temp = action(function () {
|
||||
parallel--;
|
||||
total_run++;
|
||||
proceed();
|
||||
next();
|
||||
});
|
||||
if (temp !== (last_run_ordered + 1)) {
|
||||
throw new Error("out of order");
|
||||
}
|
||||
last_run_ordered = temp;
|
||||
}));
|
||||
case 'WRITE':
|
||||
return schedule.blocking(uid, w(function (next) {
|
||||
parallel++;
|
||||
action(function () {
|
||||
parallel--;
|
||||
total_run++;
|
||||
proceed();
|
||||
next();
|
||||
});
|
||||
if (parallel > 1) {
|
||||
console.log("parallelism === %s", parallel);
|
||||
throw new Error("too much parallel");
|
||||
}
|
||||
}));
|
||||
case 'READ':
|
||||
return schedule.unordered(uid, w(function (next) {
|
||||
parallel++;
|
||||
action(function () {
|
||||
parallel--;
|
||||
total_run++;
|
||||
proceed();
|
||||
next();
|
||||
});
|
||||
}));
|
||||
default:
|
||||
throw new Error("wut");
|
||||
}
|
||||
});
|
||||
}).nThen(function () {
|
||||
// make assertions about the whole script
|
||||
if (total_run !== TO_RUN) {
|
||||
console.log("Ran %s / %s", total_run, TO_RUN);
|
||||
throw new Error("skipped tasks");
|
||||
}
|
||||
console.log("total_run === %s", total_run);
|
||||
|
||||
cb();
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
var randomScript = function () {
|
||||
var len = rand(15) + 10;
|
||||
var script = [];
|
||||
while (len--) {
|
||||
script.push([
|
||||
chooseAction(),
|
||||
rand_time(),
|
||||
]);
|
||||
}
|
||||
return script;
|
||||
};
|
||||
|
||||
var WRITE = function (t) {
|
||||
return ['WRITE', t];
|
||||
};
|
||||
var READ = function (t) {
|
||||
return ['READ', t];
|
||||
};
|
||||
|
||||
var APPEND = function (t) {
|
||||
return ['APPEND', t];
|
||||
};
|
||||
|
||||
nThen(function (w) {
|
||||
test([
|
||||
['READ', 150],
|
||||
['APPEND', 200],
|
||||
['APPEND', 100],
|
||||
['READ', 350],
|
||||
['WRITE', 400],
|
||||
['APPEND', 275],
|
||||
['APPEND', 187],
|
||||
['WRITE', 330],
|
||||
['WRITE', 264],
|
||||
['WRITE', 256],
|
||||
], w(function () {
|
||||
console.log("finished pre-scripted test\n");
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
test([
|
||||
WRITE(289),
|
||||
APPEND(281),
|
||||
READ(207),
|
||||
WRITE(225),
|
||||
READ(279),
|
||||
WRITE(300),
|
||||
READ(331),
|
||||
APPEND(341),
|
||||
APPEND(385),
|
||||
READ(313),
|
||||
WRITE(285),
|
||||
READ(304),
|
||||
APPEND(273),
|
||||
APPEND(150),
|
||||
WRITE(246),
|
||||
READ(244),
|
||||
WRITE(172),
|
||||
APPEND(253),
|
||||
READ(215),
|
||||
READ(296),
|
||||
APPEND(281),
|
||||
APPEND(296),
|
||||
WRITE(168),
|
||||
], w(function () {
|
||||
console.log("finished 2nd pre-scripted test\n");
|
||||
}));
|
||||
}).nThen(function () {
|
||||
var totalTests = 50;
|
||||
var randomTests = 1;
|
||||
|
||||
var last = nThen(function () {
|
||||
console.log("beginning randomized tests");
|
||||
});
|
||||
|
||||
var queueRandomTest = function (i) {
|
||||
last = last.nThen(function (w) {
|
||||
console.log("running random test script #%s\n", i);
|
||||
test(randomScript(), w(function () {
|
||||
console.log("finished random test #%s\n", i);
|
||||
}));
|
||||
});
|
||||
};
|
||||
|
||||
while (randomTests <=totalTests) { queueRandomTest(randomTests++); }
|
||||
|
||||
last.nThen(function () {
|
||||
console.log("finished %s random tests", totalTests);
|
||||
});
|
||||
});
|
||||
|
||||
|
117
server.js
117
server.js
|
@ -4,17 +4,12 @@
|
|||
var Express = require('express');
|
||||
var Http = require('http');
|
||||
var Fs = require('fs');
|
||||
var WebSocketServer = require('ws').Server;
|
||||
var NetfluxSrv = require('chainpad-server/NetfluxWebsocketSrv');
|
||||
var Package = require('./package.json');
|
||||
var Path = require("path");
|
||||
var nThen = require("nthen");
|
||||
|
||||
var config = require("./lib/load-config");
|
||||
|
||||
// support multiple storage back ends
|
||||
var Storage = require('./storage/file');
|
||||
|
||||
var app = Express();
|
||||
|
||||
// mode can be FRESH (default), DEV, or PACKAGE
|
||||
|
@ -69,11 +64,9 @@ var setHeaders = (function () {
|
|||
if (Object.keys(headers).length) {
|
||||
return function (req, res) {
|
||||
const h = [
|
||||
/^\/pad(2)?\/inner\.html.*/,
|
||||
/^\/pad\/inner\.html.*/,
|
||||
/^\/common\/onlyoffice\/.*\/index\.html.*/,
|
||||
/^\/sheet\/inner\.html.*/,
|
||||
/^\/ooslide\/inner\.html.*/,
|
||||
/^\/oodoc\/inner\.html.*/,
|
||||
/^\/(sheet|ooslide|oodoc)\/inner\.html.*/,
|
||||
].some((regex) => {
|
||||
return regex.test(req.url)
|
||||
}) ? padHeaders : headers;
|
||||
|
@ -117,11 +110,6 @@ app.use(function (req, res, next) {
|
|||
|
||||
app.use(Express.static(__dirname + '/www'));
|
||||
|
||||
Fs.exists(__dirname + "/customize", function (e) {
|
||||
if (e) { return; }
|
||||
console.log("Cryptpad is customizable, see customize.dist/readme.md for details");
|
||||
});
|
||||
|
||||
// FIXME I think this is a regression caused by a recent PR
|
||||
// correct this hack without breaking the contributor's intended behaviour.
|
||||
|
||||
|
@ -207,81 +195,36 @@ app.use(function (req, res, next) {
|
|||
|
||||
var httpServer = Http.createServer(app);
|
||||
|
||||
httpServer.listen(config.httpPort,config.httpAddress,function(){
|
||||
var host = config.httpAddress;
|
||||
var hostName = !host.indexOf(':') ? '[' + host + ']' : host;
|
||||
|
||||
var port = config.httpPort;
|
||||
var ps = port === 80? '': ':' + port;
|
||||
|
||||
console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps);
|
||||
});
|
||||
if (config.httpSafePort) {
|
||||
Http.createServer(app).listen(config.httpSafePort, config.httpAddress);
|
||||
}
|
||||
|
||||
var wsConfig = { server: httpServer };
|
||||
|
||||
var rpc;
|
||||
var historyKeeper;
|
||||
|
||||
var log;
|
||||
|
||||
// Initialize logging, the the store, then tasks, then rpc, then history keeper and then start the server
|
||||
var nt = nThen(function (w) {
|
||||
// set up logger
|
||||
var Logger = require("./lib/log");
|
||||
//console.log("Loading logging module");
|
||||
Logger.create(config, w(function (_log) {
|
||||
log = config.log = _log;
|
||||
nThen(function (w) {
|
||||
Fs.exists(__dirname + "/customize", w(function (e) {
|
||||
if (e) { return; }
|
||||
console.log("Cryptpad is customizable, see customize.dist/readme.md for details");
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
if (config.externalWebsocketURL) {
|
||||
// if you plan to use an external websocket server
|
||||
// then you don't need to load any API services other than the logger.
|
||||
// Just abort.
|
||||
w.abort();
|
||||
return;
|
||||
httpServer.listen(config.httpPort,config.httpAddress,function(){
|
||||
var host = config.httpAddress;
|
||||
var hostName = !host.indexOf(':') ? '[' + host + ']' : host;
|
||||
|
||||
var port = config.httpPort;
|
||||
var ps = port === 80? '': ':' + port;
|
||||
|
||||
console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps);
|
||||
});
|
||||
|
||||
if (config.httpSafePort) {
|
||||
Http.createServer(app).listen(config.httpSafePort, config.httpAddress, w());
|
||||
}
|
||||
Storage.create(config, w(function (_store) {
|
||||
config.store = _store;
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
var Tasks = require("./storage/tasks");
|
||||
Tasks.create(config, w(function (e, tasks) {
|
||||
if (e) {
|
||||
throw e;
|
||||
}
|
||||
config.tasks = tasks;
|
||||
if (config.disableIntegratedTasks) { return; }
|
||||
setInterval(function () {
|
||||
tasks.runAll(function (err) {
|
||||
if (err) {
|
||||
// either TASK_CONCURRENCY or an error with tasks.list
|
||||
// in either case it is already logged.
|
||||
}
|
||||
});
|
||||
}, 1000 * 60 * 5); // run every five minutes
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
require("./rpc").create(config, w(function (e, _rpc) {
|
||||
if (e) {
|
||||
w.abort();
|
||||
throw e;
|
||||
}
|
||||
rpc = _rpc;
|
||||
}));
|
||||
}).nThen(function () {
|
||||
var HK = require('./historyKeeper.js');
|
||||
var hkConfig = {
|
||||
tasks: config.tasks,
|
||||
rpc: rpc,
|
||||
store: config.store,
|
||||
log: log,
|
||||
retainData: Boolean(config.retainData),
|
||||
};
|
||||
historyKeeper = HK.create(hkConfig);
|
||||
}).nThen(function () {
|
||||
var wsSrv = new WebSocketServer(wsConfig);
|
||||
NetfluxSrv.run(wsSrv, config, historyKeeper);
|
||||
var wsConfig = { server: httpServer };
|
||||
|
||||
// Initialize logging then start the API server
|
||||
require("./lib/log").create(config, function (_log) {
|
||||
config.log = _log;
|
||||
config.httpServer = httpServer;
|
||||
|
||||
if (config.externalWebsocketURL) { return; }
|
||||
require("./lib/api").create(config);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
|
|
363
storage/file.js
363
storage/file.js
|
@ -7,6 +7,10 @@ var Path = require("path");
|
|||
var nThen = require("nthen");
|
||||
var Semaphore = require("saferphore");
|
||||
var Util = require("../lib/common-util");
|
||||
var Meta = require("../lib/metadata");
|
||||
var Extras = require("../lib/hk-util");
|
||||
|
||||
const Schedule = require("../lib/schedule");
|
||||
const Readline = require("readline");
|
||||
const ToPull = require('stream-to-pull-stream');
|
||||
const Pull = require('pull-stream');
|
||||
|
@ -37,6 +41,10 @@ var mkArchiveMetadataPath = function (env, channelId) {
|
|||
return Path.join(env.archiveRoot, 'datastore', channelId.slice(0, 2), channelId) + '.metadata.ndjson';
|
||||
};
|
||||
|
||||
var mkTempPath = function (env, channelId) {
|
||||
return mkPath(env, channelId) + '.temp';
|
||||
};
|
||||
|
||||
// pass in the path so we can reuse the same function for archived files
|
||||
var channelExists = function (filepath, cb) {
|
||||
Fs.stat(filepath, function (err, stat) {
|
||||
|
@ -553,9 +561,6 @@ var listChannels = function (root, handler, cb) {
|
|||
// to an equivalent location in the cold storage directory
|
||||
var archiveChannel = function (env, channelName, cb) {
|
||||
// TODO close channels before archiving them?
|
||||
if (!env.retainData) {
|
||||
return void cb("ARCHIVES_DISABLED");
|
||||
}
|
||||
|
||||
// ctime is the most reliable indicator of when a file was archived
|
||||
// because it is used to indicate changes to the files metadata
|
||||
|
@ -752,6 +757,8 @@ var getChannel = function (
|
|||
}
|
||||
|
||||
if (env.openFiles >= env.openFileLimit) {
|
||||
// FIXME warn if this is the case?
|
||||
// alternatively use graceful-fs to handle lots of concurrent reads
|
||||
// if you're running out of open files, asynchronously clean up expired files
|
||||
// do it on a shorter timeframe, though (half of normal)
|
||||
setTimeout(function () {
|
||||
|
@ -867,40 +874,187 @@ var getMessages = function (env, chanName, handler, cb) {
|
|||
});
|
||||
};
|
||||
|
||||
/*::
|
||||
export type ChainPadServer_MessageObj_t = { buff: Buffer, offset: number };
|
||||
export type ChainPadServer_Storage_t = {
|
||||
readMessagesBin: (
|
||||
channelName:string,
|
||||
start:number,
|
||||
asyncMsgHandler:(msg:ChainPadServer_MessageObj_t, moreCb:()=>void, abortCb:()=>void)=>void,
|
||||
cb:(err:?Error)=>void
|
||||
)=>void,
|
||||
message: (channelName:string, content:string, cb:(err:?Error)=>void)=>void,
|
||||
messageBin: (channelName:string, content:Buffer, cb:(err:?Error)=>void)=>void,
|
||||
getMessages: (channelName:string, msgHandler:(msg:string)=>void, cb:(err:?Error)=>void)=>void,
|
||||
removeChannel: (channelName:string, cb:(err:?Error)=>void)=>void,
|
||||
closeChannel: (channelName:string, cb:(err:?Error)=>void)=>void,
|
||||
flushUnusedChannels: (cb:()=>void)=>void,
|
||||
getChannelSize: (channelName:string, cb:(err:?Error, size:?number)=>void)=>void,
|
||||
getChannelMetadata: (channelName:string, cb:(err:?Error|string, data:?any)=>void)=>void,
|
||||
clearChannel: (channelName:string, (err:?Error)=>void)=>void
|
||||
var trimChannel = function (env, channelName, hash, _cb) {
|
||||
var cb = Util.once(Util.mkAsync(_cb));
|
||||
// this function is queued as a blocking action for the relevant channel
|
||||
|
||||
// derive temporary file paths for metadata and log buffers
|
||||
var tempChannelPath = mkTempPath(env, channelName);
|
||||
|
||||
// derive production db paths
|
||||
var channelPath = mkPath(env, channelName);
|
||||
var metadataPath = mkMetadataPath(env, channelName);
|
||||
|
||||
// derive archive paths
|
||||
var archiveChannelPath = mkArchivePath(env, channelName);
|
||||
var archiveMetadataPath = mkArchiveMetadataPath(env, channelName);
|
||||
|
||||
var metadataReference = {};
|
||||
|
||||
var tempStream;
|
||||
var ABORT;
|
||||
|
||||
var cleanUp = function (cb) {
|
||||
if (tempStream && !tempStream.closed) {
|
||||
try {
|
||||
tempStream.close();
|
||||
} catch (err) { }
|
||||
}
|
||||
|
||||
Fse.unlink(tempChannelPath, function (err) {
|
||||
// proceed if deleted or if there was nothing to delete
|
||||
if (!err || err.code === 'ENOENT') { return cb(); }
|
||||
// else abort and call back with the error
|
||||
cb(err);
|
||||
});
|
||||
};
|
||||
|
||||
nThen(function (w) {
|
||||
// close the file descriptor if it is open
|
||||
closeChannel(env, channelName, w(function (err) {
|
||||
if (err) {
|
||||
w.abort();
|
||||
return void cb(err);
|
||||
}
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
cleanUp(w(function (err) {
|
||||
if (err) {
|
||||
w.abort();
|
||||
cb(err);
|
||||
}
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// eat errors since loading the logger here would create a cyclical dependency
|
||||
var lineHandler = Meta.createLineHandler(metadataReference, Util.noop);
|
||||
|
||||
readMetadata(env, channelName, lineHandler, w(function (err) {
|
||||
if (err) {
|
||||
w.abort();
|
||||
return void cb(err);
|
||||
}
|
||||
// if there were no errors just fall through to the next block
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// create temp buffer writeStream
|
||||
tempStream = Fs.createWriteStream(tempChannelPath, {
|
||||
flags: 'a',
|
||||
});
|
||||
tempStream.on('open', w());
|
||||
tempStream.on('error', function (err) {
|
||||
w.abort();
|
||||
ABORT = true;
|
||||
cleanUp(function () {
|
||||
cb(err);
|
||||
});
|
||||
});
|
||||
}).nThen(function (w) {
|
||||
var i = 0;
|
||||
var retain = false;
|
||||
|
||||
var handler = function (msgObj, readMore, abort) {
|
||||
if (ABORT) { return void abort(); }
|
||||
// the first message might be metadata... ignore it if so
|
||||
if (i++ === 0 && msgObj.buff.indexOf('{') === 0) {
|
||||
return readMore();
|
||||
}
|
||||
|
||||
if (retain) {
|
||||
// if this flag is set then you've already found
|
||||
// the message you were looking for.
|
||||
// write it to your temp buffer and keep going
|
||||
return void tempStream.write(msgObj.buff, function () {
|
||||
readMore();
|
||||
});
|
||||
}
|
||||
|
||||
var msg = Util.tryParse(msgObj.buff.toString('utf8'));
|
||||
|
||||
var msgHash = Extras.getHash(msg[4]);
|
||||
|
||||
if (msgHash === hash) {
|
||||
// everything from this point on should be retained
|
||||
retain = true;
|
||||
return void tempStream.write(msgObj.buff, function () {
|
||||
readMore();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
readMessagesBin(env, channelName, 0, handler, w(function (err) {
|
||||
if (err) {
|
||||
w.abort();
|
||||
return void cleanUp(function () {
|
||||
// intentionally call back with main error
|
||||
// not the cleanup error
|
||||
cb(err);
|
||||
});
|
||||
}
|
||||
|
||||
if (!retain) {
|
||||
// you never found the message you were looking for
|
||||
// this whole operation is invalid...
|
||||
// clean up, abort, and call back with an error
|
||||
|
||||
w.abort();
|
||||
cleanUp(function () {
|
||||
// intentionally call back with main error
|
||||
// not the cleanup error
|
||||
cb('HASH_NOT_FOUND');
|
||||
});
|
||||
}
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// copy existing channel to the archive
|
||||
Fse.copy(channelPath, archiveChannelPath, w(function (err) {
|
||||
if (!err || err.code === 'ENOENT') { return; }
|
||||
w.abort();
|
||||
cleanUp(function () {
|
||||
cb(err);
|
||||
});
|
||||
}));
|
||||
|
||||
// copy existing metadaata to the archive
|
||||
Fse.copy(metadataPath, archiveMetadataPath, w(function (err) {
|
||||
if (!err || err.code === 'ENOENT') { return; }
|
||||
w.abort();
|
||||
cleanUp(function () {
|
||||
cb(err);
|
||||
});
|
||||
}));
|
||||
}).nThen(function (w) {
|
||||
// overwrite the existing metadata log with the current metadata state
|
||||
Fs.writeFile(metadataPath, JSON.stringify(metadataReference.meta) + '\n', w(function (err) {
|
||||
// this shouldn't happen, but if it does your channel might be messed up :(
|
||||
if (err) {
|
||||
w.abort();
|
||||
cb(err);
|
||||
}
|
||||
}));
|
||||
|
||||
// overwrite the existing channel with the temp log
|
||||
Fse.move(tempChannelPath, channelPath, {
|
||||
overwrite: true,
|
||||
}, w(function (err) {
|
||||
// this shouldn't happen, but if it does your channel might be messed up :(
|
||||
if (err) {
|
||||
w.abort();
|
||||
cb(err);
|
||||
}
|
||||
}));
|
||||
}).nThen(function () {
|
||||
// clean up and call back with no error
|
||||
// triggering a historyKeeper index cache eviction...
|
||||
cleanUp(function () {
|
||||
cb();
|
||||
});
|
||||
});
|
||||
};
|
||||
export type ChainPadServer_Config_t = {
|
||||
verbose?: boolean,
|
||||
filePath?: string,
|
||||
channelExpirationMs?: number,
|
||||
openFileLimit?: number
|
||||
};
|
||||
*/
|
||||
module.exports.create = function (
|
||||
conf /*:ChainPadServer_Config_t*/,
|
||||
cb /*:(store:ChainPadServer_Storage_t)=>void*/
|
||||
) {
|
||||
|
||||
module.exports.create = function (conf, cb) {
|
||||
var env = {
|
||||
root: conf.filePath || './datastore',
|
||||
archiveRoot: conf.archivePath || './data/archive',
|
||||
retainData: conf.retainData,
|
||||
channels: { },
|
||||
channelExpirationMs: conf.channelExpirationMs || 30000,
|
||||
verbose: conf.verbose,
|
||||
|
@ -909,6 +1063,24 @@ module.exports.create = function (
|
|||
};
|
||||
var it;
|
||||
|
||||
/* our scheduler prioritizes and executes tasks with respect
|
||||
to all other tasks invoked with an identical key
|
||||
(typically the id of the concerned channel)
|
||||
|
||||
it assumes that all tasks can be categorized into three types
|
||||
|
||||
1. unordered tasks such as streaming reads which can take
|
||||
a long time to complete.
|
||||
|
||||
2. ordered tasks such as appending to a file which does not
|
||||
take very long, but where priority is important.
|
||||
|
||||
3. blocking tasks such as rewriting a file where it would be
|
||||
dangerous to perform any other task concurrently.
|
||||
|
||||
*/
|
||||
var schedule = env.schedule = Schedule();
|
||||
|
||||
nThen(function (w) {
|
||||
// make sure the store's directory exists
|
||||
Fse.mkdirp(env.root, PERMISSIVE, w(function (err) {
|
||||
|
@ -928,43 +1100,80 @@ module.exports.create = function (
|
|||
// write a new message to a log
|
||||
message: function (channelName, content, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
message(env, channelName, content, cb);
|
||||
schedule.ordered(channelName, function (next) {
|
||||
message(env, channelName, content, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// iterate over all the messages in a log
|
||||
getMessages: function (channelName, msgHandler, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
getMessages(env, channelName, msgHandler, cb);
|
||||
schedule.unordered(channelName, function (next) {
|
||||
getMessages(env, channelName, msgHandler, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// NEWER IMPLEMENTATIONS OF THE SAME THING
|
||||
// write a new message to a log
|
||||
messageBin: (channelName, content, cb) => {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
messageBin(env, channelName, content, cb);
|
||||
schedule.ordered(channelName, function (next) {
|
||||
messageBin(env, channelName, content, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// iterate over the messages in a log
|
||||
readMessagesBin: (channelName, start, asyncMsgHandler, cb) => {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
readMessagesBin(env, channelName, start, asyncMsgHandler, cb);
|
||||
// XXX there is a race condition here
|
||||
// historyKeeper reads the file to find the byte offset of the first interesting message
|
||||
// then calls this function again to read from that point.
|
||||
// If this task is in the queue already when the file is read again
|
||||
// then that byte offset will have been invalidated
|
||||
// and the resulting stream probably won't align with message boundaries.
|
||||
// We can evict the cache in the callback but by that point it will be too late.
|
||||
// Presumably we'll need to bury some of historyKeeper's logic into a filestore method
|
||||
// in order to make index/read sequences atomic.
|
||||
// Otherwise, we can add a new task type to the scheduler to take invalidation into account...
|
||||
// either method introduces significant complexity.
|
||||
schedule.unordered(channelName, function (next) {
|
||||
readMessagesBin(env, channelName, start, asyncMsgHandler, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// METHODS for deleting data
|
||||
// remove a channel and its associated metadata log if present
|
||||
removeChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
removeChannel(env, channelName, function (err) {
|
||||
cb(err);
|
||||
// XXX there's another race condition here...
|
||||
// when a remove and an append are scheduled in that order
|
||||
// the remove will delete the channel's metadata (including its validateKey)
|
||||
// then the append will recreate the channel and insert a message.
|
||||
// clients that are connected to the channel via historyKeeper should be kicked out
|
||||
// however, anyone that connects to that channel in the future will be able to read the
|
||||
// signed message, but will not find its validate key...
|
||||
// resulting in a junk/unusable document
|
||||
schedule.ordered(channelName, function (next) {
|
||||
removeChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// remove a channel and its associated metadata log from the archive directory
|
||||
removeArchivedChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
removeArchivedChannel(env, channelName, cb);
|
||||
schedule.ordered(channelName, function (next) {
|
||||
removeArchivedChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// clear all data for a channel but preserve its metadata
|
||||
clearChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
clearChannel(env, channelName, cb);
|
||||
schedule.ordered(channelName, function (next) {
|
||||
clearChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
trimChannel: function (channelName, hash, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
schedule.blocking(channelName, function (next) {
|
||||
trimChannel(env, channelName, hash, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// check if a channel exists in the database
|
||||
|
@ -972,47 +1181,85 @@ module.exports.create = function (
|
|||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
// construct the path
|
||||
var filepath = mkPath(env, channelName);
|
||||
channelExists(filepath, cb);
|
||||
// (ansuz) I'm uncertain whether this task should be unordered or ordered.
|
||||
// there's a round trip to the client (and possibly the user) before they decide
|
||||
// to act on the information of whether there is already content present in this channel.
|
||||
// so it's practically impossible to avoid race conditions where someone else creates
|
||||
// some content before you.
|
||||
// if that's the case, it's basically impossible that you'd generate the same signing key,
|
||||
// and thus historykeeper should reject the signed messages of whoever loses the race.
|
||||
// thus 'unordered' seems appropriate.
|
||||
schedule.unordered(channelName, function (next) {
|
||||
channelExists(filepath, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// check if a channel exists in the archive
|
||||
isChannelArchived: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
// construct the path
|
||||
var filepath = mkArchivePath(env, channelName);
|
||||
channelExists(filepath, cb);
|
||||
// as with the method above, somebody might remove, restore, or overwrite an archive
|
||||
// in the time that it takes to answer this query and to execute whatever follows.
|
||||
// since it's impossible to win the race every time let's just make this 'unordered'
|
||||
schedule.unordered(channelName, function (next) {
|
||||
channelExists(filepath, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// move a channel from the database to the archive, along with its metadata
|
||||
archiveChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
archiveChannel(env, channelName, cb);
|
||||
// again, the semantics around archiving and appending are really muddy.
|
||||
// so I'm calling this 'unordered' again
|
||||
schedule.unordered(channelName, function (next) {
|
||||
archiveChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// restore a channel from the archive to the database, along with its metadata
|
||||
restoreArchivedChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
unarchiveChannel(env, channelName, cb);
|
||||
// archive restoration will fail if either a file or its metadata exists in the live db.
|
||||
// so I'm calling this 'ordered' to give writes a chance to flush out.
|
||||
// accidental conflicts are extremely unlikely since clients check the status
|
||||
// of a previously known channel before joining.
|
||||
schedule.ordered(channelName, function (next) {
|
||||
unarchiveChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// METADATA METHODS
|
||||
// fetch the metadata for a channel
|
||||
getChannelMetadata: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
getChannelMetadata(env, channelName, cb);
|
||||
// The only thing that can invalid this method's results are channel archival, removal, or trimming.
|
||||
// We want it to be fast, so let's make it unordered.
|
||||
schedule.unordered(channelName, function (next) {
|
||||
getChannelMetadata(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// iterate over lines of metadata changes from a dedicated log
|
||||
readDedicatedMetadata: function (channelName, handler, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
getDedicatedMetadata(env, channelName, handler, cb);
|
||||
// Everything that modifies metadata also updates clients, so this can be 'unordered'
|
||||
schedule.unordered(channelName, function (next) {
|
||||
getDedicatedMetadata(env, channelName, handler, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// iterate over multiple lines of metadata changes
|
||||
readChannelMetadata: function (channelName, handler, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
readMetadata(env, channelName, handler, cb);
|
||||
// same logic as 'readDedicatedMetadata
|
||||
schedule.unordered(channelName, function (next) {
|
||||
readMetadata(env, channelName, handler, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// write a new line to a metadata log
|
||||
writeMetadata: function (channelName, data, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
writeMetadata(env, channelName, data, cb);
|
||||
// metadata writes are fast and should be applied in order
|
||||
schedule.ordered(channelName, function (next) {
|
||||
writeMetadata(env, channelName, data, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
|
||||
// CHANNEL ITERATION
|
||||
|
@ -1025,13 +1272,22 @@ module.exports.create = function (
|
|||
|
||||
getChannelSize: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
channelBytes(env, channelName, cb);
|
||||
// this method should be really fast and it probably doesn't matter much
|
||||
// if we get the size slightly before or after somebody writes a few hundred bytes to it.
|
||||
schedule.ordered(channelName, function (next) {
|
||||
channelBytes(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// OTHER DATABASE FUNCTIONALITY
|
||||
// remove a particular channel from the cache
|
||||
closeChannel: function (channelName, cb) {
|
||||
if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); }
|
||||
closeChannel(env, channelName, cb);
|
||||
// It is most likely the case that the channel is inactive if we are trying to close it,
|
||||
// thus it doesn't make much difference whether it's ordered or not.
|
||||
// In any case, it will be re-opened if anyone tries to write to it.
|
||||
schedule.ordered(channelName, function (next) {
|
||||
closeChannel(env, channelName, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// iterate over open channels and close any that are not active
|
||||
flushUnusedChannels: function (cb) {
|
||||
|
@ -1039,7 +1295,10 @@ module.exports.create = function (
|
|||
},
|
||||
// write to a log file
|
||||
log: function (channelName, content, cb) {
|
||||
message(env, channelName, content, cb);
|
||||
// you probably want the events in your log to be in the correct order.
|
||||
schedule.ordered(channelName, function (next) {
|
||||
message(env, channelName, content, Util.both(cb, next));
|
||||
});
|
||||
},
|
||||
// shut down the database
|
||||
shutdown: function () {
|
||||
|
|
|
@ -202,22 +202,6 @@ var expire = function (env, task, cb) {
|
|||
var Log = env.log;
|
||||
var args = task.slice(2);
|
||||
|
||||
if (!env.retainData) {
|
||||
Log.info('DELETION_SCHEDULED_EXPIRATION', {
|
||||
task: task,
|
||||
});
|
||||
env.store.removeChannel(args[0], function (err) {
|
||||
if (err) {
|
||||
Log.error('DELETION_SCHEDULED_EXPIRATION_ERROR', {
|
||||
task: task,
|
||||
error: err,
|
||||
});
|
||||
}
|
||||
cb();
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', {
|
||||
task: task,
|
||||
});
|
||||
|
@ -381,7 +365,6 @@ Tasks.create = function (config, cb) {
|
|||
root: config.taskPath || './tasks',
|
||||
log: config.log,
|
||||
store: config.store,
|
||||
retainData: Boolean(config.retainData),
|
||||
};
|
||||
|
||||
// make sure the path exists...
|
||||
|
|
|
@ -491,6 +491,11 @@ define([
|
|||
$ok.focus();
|
||||
Notifier.notify();
|
||||
});
|
||||
|
||||
return {
|
||||
element: frame,
|
||||
delete: close
|
||||
};
|
||||
};
|
||||
|
||||
UI.prompt = function (msg, def, cb, opt, force) {
|
||||
|
@ -1103,39 +1108,36 @@ define([
|
|||
return radio;
|
||||
};
|
||||
|
||||
var corner = {
|
||||
queue: [],
|
||||
state: false
|
||||
};
|
||||
UI.cornerPopup = function (text, actions, footer, opts) {
|
||||
opts = opts || {};
|
||||
|
||||
var minimize = h('div.cp-corner-minimize.fa.fa-window-minimize');
|
||||
var maximize = h('div.cp-corner-maximize.fa.fa-window-maximize');
|
||||
var dontShowAgain = h('div.cp-corner-dontshow', [
|
||||
h('span.fa.fa-times'),
|
||||
Messages.dontShowAgain || "Don't show again" // XXX
|
||||
]);
|
||||
|
||||
var popup = h('div.cp-corner-container', [
|
||||
minimize,
|
||||
maximize,
|
||||
h('div.cp-corner-filler', { style: "width:110px;" }),
|
||||
h('div.cp-corner-filler', { style: "width:80px;" }),
|
||||
h('div.cp-corner-filler', { style: "width:60px;" }),
|
||||
h('div.cp-corner-filler', { style: "width:40px;" }),
|
||||
h('div.cp-corner-filler', { style: "width:20px;" }),
|
||||
setHTML(h('div.cp-corner-text'), text),
|
||||
h('div.cp-corner-actions', actions),
|
||||
setHTML(h('div.cp-corner-footer'), footer)
|
||||
setHTML(h('div.cp-corner-footer'), footer),
|
||||
opts.dontShowAgain ? dontShowAgain : undefined
|
||||
]);
|
||||
|
||||
var $popup = $(popup);
|
||||
|
||||
$(minimize).click(function () {
|
||||
$popup.addClass('cp-minimized');
|
||||
});
|
||||
$(maximize).click(function () {
|
||||
$popup.removeClass('cp-minimized');
|
||||
});
|
||||
|
||||
if (opts.hidden) {
|
||||
$popup.addClass('cp-minimized');
|
||||
}
|
||||
if (opts.big) {
|
||||
$popup.addClass('cp-corner-big');
|
||||
}
|
||||
if (opts.alt) {
|
||||
$popup.addClass('cp-corner-alt');
|
||||
}
|
||||
|
||||
var hide = function () {
|
||||
$popup.hide();
|
||||
|
@ -1145,9 +1147,28 @@ define([
|
|||
};
|
||||
var deletePopup = function () {
|
||||
$popup.remove();
|
||||
if (!corner.queue.length) {
|
||||
corner.state = false;
|
||||
return;
|
||||
}
|
||||
setTimeout(function () {
|
||||
$('body').append(corner.queue.pop());
|
||||
}, 5000);
|
||||
};
|
||||
|
||||
$('body').append(popup);
|
||||
$(dontShowAgain).click(function () {
|
||||
deletePopup();
|
||||
if (typeof(opts.dontShowAgain) === "function") {
|
||||
opts.dontShowAgain();
|
||||
}
|
||||
});
|
||||
|
||||
if (corner.state) {
|
||||
corner.queue.push(popup);
|
||||
} else {
|
||||
corner.state = true;
|
||||
$('body').append(popup);
|
||||
}
|
||||
|
||||
return {
|
||||
popup: popup,
|
||||
|
|
|
@ -53,10 +53,18 @@ define([
|
|||
return list;
|
||||
};
|
||||
|
||||
Msg.declineFriendRequest = function (store, data, cb) {
|
||||
store.mailbox.sendTo('DECLINE_FRIEND_REQUEST', {}, {
|
||||
channel: data.notifications,
|
||||
curvePublic: data.curvePublic
|
||||
}, function (obj) {
|
||||
cb(obj);
|
||||
});
|
||||
};
|
||||
Msg.acceptFriendRequest = function (store, data, cb) {
|
||||
var friend = getFriend(store.proxy, data.curvePublic) || {};
|
||||
var myData = createData(store.proxy, friend.channel || data.channel);
|
||||
store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', myData, {
|
||||
store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', { user: myData }, {
|
||||
channel: data.notifications,
|
||||
curvePublic: data.curvePublic
|
||||
}, function (obj) {
|
||||
|
@ -110,7 +118,7 @@ define([
|
|||
var proxy = store.proxy;
|
||||
var friend = proxy.friends[curvePublic];
|
||||
if (!friend) { return void cb({error: 'ENOENT'}); }
|
||||
if (!friend.notifications || !friend.channel) { return void cb({error: 'EINVAL'}); }
|
||||
if (!friend.notifications) { return void cb({error: 'EINVAL'}); }
|
||||
|
||||
store.mailbox.sendTo('UNFRIEND', {
|
||||
curvePublic: proxy.curvePublic
|
||||
|
|
|
@ -63,6 +63,21 @@ define([
|
|||
});
|
||||
};
|
||||
|
||||
var dcAlert;
|
||||
UIElements.disconnectAlert = function () {
|
||||
if (dcAlert && $(dcAlert.element).length) { return; }
|
||||
dcAlert = UI.alert(Messages.common_connectionLost, undefined, true);
|
||||
};
|
||||
UIElements.reconnectAlert = function () {
|
||||
if (!dcAlert) { return; }
|
||||
if (!dcAlert.delete) {
|
||||
dcAlert = undefined;
|
||||
return;
|
||||
}
|
||||
dcAlert.delete();
|
||||
dcAlert = undefined;
|
||||
};
|
||||
|
||||
var importContent = function (type, f, cfg) {
|
||||
return function () {
|
||||
var $files = $('<input>', {type:"file"});
|
||||
|
@ -219,15 +234,7 @@ define([
|
|||
common.mailbox.sendTo("RM_OWNER", {
|
||||
channel: channel,
|
||||
title: data.title,
|
||||
pending: pending,
|
||||
user: {
|
||||
displayName: user.name,
|
||||
avatar: user.avatar,
|
||||
profile: user.profile,
|
||||
notifications: user.notifications,
|
||||
curvePublic: user.curvePublic,
|
||||
edPublic: priv.edPublic
|
||||
}
|
||||
pending: pending
|
||||
}, {
|
||||
channel: friend.notifications,
|
||||
curvePublic: friend.curvePublic
|
||||
|
@ -370,15 +377,7 @@ define([
|
|||
channel: channel,
|
||||
href: data.href,
|
||||
password: data.password,
|
||||
title: data.title,
|
||||
user: {
|
||||
displayName: user.name,
|
||||
avatar: user.avatar,
|
||||
profile: user.profile,
|
||||
notifications: user.notifications,
|
||||
curvePublic: user.curvePublic,
|
||||
edPublic: priv.edPublic
|
||||
}
|
||||
title: data.title
|
||||
}, {
|
||||
channel: friend.notifications,
|
||||
curvePublic: friend.curvePublic
|
||||
|
@ -4180,52 +4179,68 @@ define([
|
|||
};
|
||||
|
||||
var crowdfundingState = false;
|
||||
UIElements.displayCrowdfunding = function (common) {
|
||||
UIElements.displayCrowdfunding = function (common, force) {
|
||||
if (crowdfundingState) { return; }
|
||||
if (AppConfig.disableCrowdfundingMessages) { return; }
|
||||
var priv = common.getMetadataMgr().getPrivateData();
|
||||
|
||||
|
||||
var todo = function () {
|
||||
crowdfundingState = true;
|
||||
// Display the popup
|
||||
var text = Messages.crowdfunding_popup_text;
|
||||
var yes = h('button.cp-corner-primary', [
|
||||
h('span.fa.fa-external-link'),
|
||||
'OpenCollective'
|
||||
]);
|
||||
var no = h('button.cp-corner-cancel', Messages.crowdfunding_popup_no);
|
||||
var actions = h('div', [no, yes]);
|
||||
|
||||
var dontShowAgain = function () {
|
||||
common.setAttribute(['general', 'crowdfunding'], false);
|
||||
Feedback.send('CROWDFUNDING_NEVER');
|
||||
};
|
||||
|
||||
var modal = UI.cornerPopup(text, actions, null, {
|
||||
big: true,
|
||||
alt: true,
|
||||
dontShowAgain: dontShowAgain
|
||||
});
|
||||
|
||||
$(yes).click(function () {
|
||||
modal.delete();
|
||||
common.openURL(priv.accounts.donateURL);
|
||||
Feedback.send('CROWDFUNDING_YES');
|
||||
});
|
||||
$(modal.popup).find('a').click(function (e) {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
modal.delete();
|
||||
common.openURL(priv.accounts.donateURL);
|
||||
Feedback.send('CROWDFUNDING_LINK');
|
||||
});
|
||||
$(no).click(function () {
|
||||
modal.delete();
|
||||
Feedback.send('CROWDFUNDING_NO');
|
||||
});
|
||||
};
|
||||
|
||||
if (force) {
|
||||
crowdfundingState = true;
|
||||
return void todo();
|
||||
}
|
||||
|
||||
if (AppConfig.disableCrowdfundingMessages) { return; }
|
||||
if (priv.plan) { return; }
|
||||
|
||||
crowdfundingState = true;
|
||||
setTimeout(function () {
|
||||
common.getAttribute(['general', 'crowdfunding'], function (err, val) {
|
||||
if (err || val === false) { return; }
|
||||
common.getSframeChannel().query('Q_GET_PINNED_USAGE', null, function (err, obj) {
|
||||
var quotaMb = obj.quota / (1024 * 1024);
|
||||
if (quotaMb < 10) { return; }
|
||||
// Display the popup
|
||||
var text = Messages.crowdfunding_popup_text;
|
||||
var yes = h('button.cp-corner-primary', Messages.crowdfunding_popup_yes);
|
||||
var no = h('button.cp-corner-primary', Messages.crowdfunding_popup_no);
|
||||
var never = h('button.cp-corner-cancel', Messages.crowdfunding_popup_never);
|
||||
var actions = h('div', [yes, no, never]);
|
||||
|
||||
var modal = UI.cornerPopup(text, actions, null, {big: true});
|
||||
|
||||
$(yes).click(function () {
|
||||
modal.delete();
|
||||
common.openURL(priv.accounts.donateURL);
|
||||
Feedback.send('CROWDFUNDING_YES');
|
||||
});
|
||||
$(modal.popup).find('a').click(function (e) {
|
||||
e.stopPropagation();
|
||||
e.preventDefault();
|
||||
modal.delete();
|
||||
common.openURL(priv.accounts.donateURL);
|
||||
Feedback.send('CROWDFUNDING_LINK');
|
||||
});
|
||||
$(no).click(function () {
|
||||
modal.delete();
|
||||
Feedback.send('CROWDFUNDING_NO');
|
||||
});
|
||||
$(never).click(function () {
|
||||
modal.delete();
|
||||
common.setAttribute(['general', 'crowdfunding'], false);
|
||||
Feedback.send('CROWDFUNDING_NEVER');
|
||||
});
|
||||
});
|
||||
common.getAttribute(['general', 'crowdfunding'], function (err, val) {
|
||||
if (err || val === false) { return; }
|
||||
common.getSframeChannel().query('Q_GET_PINNED_USAGE', null, function (err, obj) {
|
||||
var quotaMb = obj.quota / (1024 * 1024);
|
||||
if (quotaMb < 10) { return; }
|
||||
todo();
|
||||
});
|
||||
}, 5000);
|
||||
});
|
||||
};
|
||||
|
||||
var storePopupState = false;
|
||||
|
@ -4247,7 +4262,7 @@ define([
|
|||
|
||||
var hide = h('button.cp-corner-cancel', Messages.autostore_hide);
|
||||
var store = h('button.cp-corner-primary', Messages.autostore_store);
|
||||
var actions = h('div', [store, hide]);
|
||||
var actions = h('div', [hide, store]);
|
||||
|
||||
var initialHide = data && data.autoStore && data.autoStore === -1;
|
||||
var modal = UI.cornerPopup(text, actions, footer, {hidden: initialHide});
|
||||
|
@ -4402,7 +4417,8 @@ define([
|
|||
|
||||
UIElements.displayFriendRequestModal = function (common, data) {
|
||||
var msg = data.content.msg;
|
||||
var text = Messages._getKey('contacts_request', [Util.fixHTML(msg.content.displayName)]);
|
||||
var userData = msg.content.user;
|
||||
var text = Messages._getKey('contacts_request', [Util.fixHTML(userData.displayName)]);
|
||||
|
||||
var todo = function (yes) {
|
||||
common.getSframeChannel().query("Q_ANSWER_FRIEND_REQUEST", {
|
||||
|
@ -4429,7 +4445,6 @@ define([
|
|||
|
||||
UIElements.displayAddOwnerModal = function (common, data) {
|
||||
var priv = common.getMetadataMgr().getPrivateData();
|
||||
var user = common.getMetadataMgr().getUserData();
|
||||
var sframeChan = common.getSframeChannel();
|
||||
var msg = data.content.msg;
|
||||
|
||||
|
@ -4464,15 +4479,7 @@ define([
|
|||
href: msg.content.href,
|
||||
password: msg.content.password,
|
||||
title: msg.content.title,
|
||||
answer: yes,
|
||||
user: {
|
||||
displayName: user.name,
|
||||
avatar: user.avatar,
|
||||
profile: user.profile,
|
||||
notifications: user.notifications,
|
||||
curvePublic: user.curvePublic,
|
||||
edPublic: priv.edPublic
|
||||
}
|
||||
answer: yes
|
||||
}, {
|
||||
channel: msg.content.user.notifications,
|
||||
curvePublic: msg.content.user.curvePublic
|
||||
|
@ -4553,7 +4560,6 @@ define([
|
|||
};
|
||||
UIElements.displayAddTeamOwnerModal = function (common, data) {
|
||||
var priv = common.getMetadataMgr().getPrivateData();
|
||||
var user = common.getMetadataMgr().getUserData();
|
||||
var sframeChan = common.getSframeChannel();
|
||||
var msg = data.content.msg;
|
||||
|
||||
|
@ -4570,15 +4576,7 @@ define([
|
|||
common.mailbox.sendTo("ADD_OWNER_ANSWER", {
|
||||
teamChannel: msg.content.teamChannel,
|
||||
title: msg.content.title,
|
||||
answer: yes,
|
||||
user: {
|
||||
displayName: user.name,
|
||||
avatar: user.avatar,
|
||||
profile: user.profile,
|
||||
notifications: user.notifications,
|
||||
curvePublic: user.curvePublic,
|
||||
edPublic: priv.edPublic
|
||||
}
|
||||
answer: yes
|
||||
}, {
|
||||
channel: msg.content.user.notifications,
|
||||
curvePublic: msg.content.user.curvePublic
|
||||
|
@ -4694,8 +4692,6 @@ define([
|
|||
};
|
||||
|
||||
UIElements.displayInviteTeamModal = function (common, data) {
|
||||
var priv = common.getMetadataMgr().getPrivateData();
|
||||
var user = common.getMetadataMgr().getUserData();
|
||||
var msg = data.content.msg;
|
||||
|
||||
var name = Util.fixHTML(msg.content.user.displayName) || Messages.anonymous;
|
||||
|
@ -4716,15 +4712,7 @@ define([
|
|||
common.mailbox.sendTo("INVITE_TO_TEAM_ANSWER", {
|
||||
answer: yes,
|
||||
teamChannel: msg.content.team.channel,
|
||||
teamName: teamName,
|
||||
user: {
|
||||
displayName: user.name,
|
||||
avatar: user.avatar,
|
||||
profile: user.profile,
|
||||
notifications: user.notifications,
|
||||
curvePublic: user.curvePublic,
|
||||
edPublic: priv.edPublic
|
||||
}
|
||||
teamName: teamName
|
||||
}, {
|
||||
channel: msg.content.user.notifications,
|
||||
curvePublic: msg.content.user.curvePublic
|
||||
|
|
|
@ -34,6 +34,9 @@
|
|||
};
|
||||
|
||||
Util.mkAsync = function (f) {
|
||||
if (typeof(f) !== 'function') {
|
||||
throw new Error('EXPECTED_FUNCTION');
|
||||
}
|
||||
return function () {
|
||||
var args = Array.prototype.slice.call(arguments);
|
||||
setTimeout(function () {
|
||||
|
|
|
@ -29,7 +29,9 @@ define([
|
|||
handlers['FRIEND_REQUEST'] = function (common, data) {
|
||||
var content = data.content;
|
||||
var msg = content.msg;
|
||||
var name = Util.fixHTML(msg.content.displayName) || Messages.anonymous;
|
||||
var userData = msg.content.user || msg.content;
|
||||
var name = Util.fixHTML(userData.displayName) || Messages.anonymous;
|
||||
msg.content = { user: userData };
|
||||
|
||||
// Display the notification
|
||||
content.getFormatText = function () {
|
||||
|
@ -37,7 +39,7 @@ define([
|
|||
};
|
||||
|
||||
// Check authenticity
|
||||
if (msg.author !== msg.content.curvePublic) { return; }
|
||||
if (msg.author !== userData.curvePublic) { return; }
|
||||
|
||||
// if not archived, add handlers
|
||||
if (!content.archived) {
|
||||
|
@ -51,7 +53,11 @@ define([
|
|||
handlers['FRIEND_REQUEST_ACCEPTED'] = function (common, data) {
|
||||
var content = data.content;
|
||||
var msg = content.msg;
|
||||
var name = Util.fixHTML(msg.content.name) || Messages.anonymous;
|
||||
var userData = typeof(msg.content.user) === "object" ? msg.content.user : {
|
||||
displayName: msg.content.name,
|
||||
curvePublic: msg.content.user
|
||||
};
|
||||
var name = Util.fixHTML(userData.displayName) || Messages.anonymous;
|
||||
content.getFormatText = function () {
|
||||
return Messages._getKey('friendRequest_accepted', [name]);
|
||||
};
|
||||
|
@ -63,7 +69,11 @@ define([
|
|||
handlers['FRIEND_REQUEST_DECLINED'] = function (common, data) {
|
||||
var content = data.content;
|
||||
var msg = content.msg;
|
||||
var name = Util.fixHTML(msg.content.name) || Messages.anonymous;
|
||||
var userData = typeof(msg.content.user) === "object" ? msg.content.user : {
|
||||
displayName: msg.content.name,
|
||||
curvePublic: msg.content.user
|
||||
};
|
||||
var name = Util.fixHTML(userData.displayName) || Messages.anonymous;
|
||||
content.getFormatText = function () {
|
||||
return Messages._getKey('friendRequest_declined', [name]);
|
||||
};
|
||||
|
|
|
@ -1606,17 +1606,10 @@ define([
|
|||
pinImages();
|
||||
};
|
||||
|
||||
config.onAbort = function () {
|
||||
// inform of network disconnect
|
||||
setEditable(false);
|
||||
toolbar.failed();
|
||||
UI.alert(Messages.common_connectionLost, undefined, true);
|
||||
};
|
||||
|
||||
config.onConnectionChange = function (info) {
|
||||
if (info.state) {
|
||||
// If we tried to send changes while we were offline, force a page reload
|
||||
UI.findOKButton().click();
|
||||
UIElements.reconnectAlert();
|
||||
if (Object.keys(pendingChanges).length) {
|
||||
return void UI.confirm(Messages.oo_reconnect, function (yes) {
|
||||
if (!yes) { return; }
|
||||
|
@ -1629,7 +1622,7 @@ define([
|
|||
setEditable(false);
|
||||
offline = true;
|
||||
UI.findOKButton().click();
|
||||
UI.alert(Messages.common_connectionLost, undefined, true);
|
||||
UIElements.disconnectAlert();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1260,15 +1260,15 @@ define([
|
|||
|
||||
// If we accept the request, add the friend to the list
|
||||
if (value) {
|
||||
Messaging.acceptFriendRequest(store, msg.content, function (obj) {
|
||||
Messaging.acceptFriendRequest(store, msg.content.user, function (obj) {
|
||||
if (obj && obj.error) { return void cb(obj); }
|
||||
Messaging.addToFriendList({
|
||||
proxy: store.proxy,
|
||||
realtime: store.realtime,
|
||||
pinPads: function (data, cb) { Store.pinPads(null, data, cb); },
|
||||
}, msg.content, function (err) {
|
||||
}, msg.content.user, function (err) {
|
||||
if (store.messenger) {
|
||||
store.messenger.onFriendAdded(msg.content);
|
||||
store.messenger.onFriendAdded(msg.content.user);
|
||||
}
|
||||
broadcast([], "UPDATE_METADATA");
|
||||
if (err) { return void cb({error: err}); }
|
||||
|
@ -1278,12 +1278,7 @@ define([
|
|||
return;
|
||||
}
|
||||
// Otherwise, just remove the notification
|
||||
store.mailbox.sendTo('DECLINE_FRIEND_REQUEST', {
|
||||
displayName: store.proxy['cryptpad.username']
|
||||
}, {
|
||||
channel: msg.content.notifications,
|
||||
curvePublic: msg.content.curvePublic
|
||||
}, function (obj) {
|
||||
Messaging.declineFriendRequest(store, msg.content.user, function (obj) {
|
||||
broadcast([], "UPDATE_METADATA");
|
||||
cb(obj);
|
||||
});
|
||||
|
@ -1305,8 +1300,9 @@ define([
|
|||
store.proxy.friends_pending[data.curvePublic] = +new Date();
|
||||
broadcast([], "UPDATE_METADATA");
|
||||
|
||||
var myData = Messaging.createData(store.proxy);
|
||||
store.mailbox.sendTo('FRIEND_REQUEST', myData, {
|
||||
store.mailbox.sendTo('FRIEND_REQUEST', {
|
||||
user: Messaging.createData(store.proxy)
|
||||
}, {
|
||||
channel: data.notifications,
|
||||
curvePublic: data.curvePublic
|
||||
}, function (obj) {
|
||||
|
@ -1642,11 +1638,8 @@ define([
|
|||
// If send is true, send the request to the owner.
|
||||
if (owner) {
|
||||
if (data.send) {
|
||||
var myData = Messaging.createData(store.proxy);
|
||||
delete myData.channel;
|
||||
store.mailbox.sendTo('REQUEST_PAD_ACCESS', {
|
||||
channel: data.channel,
|
||||
user: myData
|
||||
channel: data.channel
|
||||
}, {
|
||||
channel: owner.notifications,
|
||||
curvePublic: owner.curvePublic
|
||||
|
@ -1680,13 +1673,10 @@ define([
|
|||
}
|
||||
})) { return void cb({error: 'ENOTFOUND'}); }
|
||||
|
||||
var myData = Messaging.createData(store.proxy);
|
||||
delete myData.channel;
|
||||
store.mailbox.sendTo("GIVE_PAD_ACCESS", {
|
||||
channel: channel,
|
||||
href: href,
|
||||
title: title,
|
||||
user: myData
|
||||
title: title
|
||||
}, {
|
||||
channel: data.user.notifications,
|
||||
curvePublic: data.user.curvePublic
|
||||
|
@ -1720,13 +1710,11 @@ define([
|
|||
}
|
||||
// Tell all the owners that the pad was deleted from the server
|
||||
var curvePublic = store.proxy.curvePublic;
|
||||
var myData = Messaging.createData(store.proxy, false);
|
||||
m.forEach(function (obj) {
|
||||
var mb = JSON.parse(obj);
|
||||
if (mb.curvePublic === curvePublic) { return; }
|
||||
store.mailbox.sendTo('OWNED_PAD_REMOVED', {
|
||||
channel: channel,
|
||||
user: myData
|
||||
channel: channel
|
||||
}, {
|
||||
channel: mb.notifications,
|
||||
curvePublic: mb.curvePublic
|
||||
|
|
|
@ -4,6 +4,7 @@ define([
|
|||
'/common/common-util.js',
|
||||
], function (Messaging, Hash, Util) {
|
||||
|
||||
// Random timeout between 10 and 30 times your sync time (lag + chainpad sync)
|
||||
var getRandomTimeout = function (ctx) {
|
||||
var lag = ctx.store.realtime.getLag().lag || 0;
|
||||
return (Math.max(0, lag) + 300) * 20 * (0.5 + Math.random());
|
||||
|
@ -22,9 +23,11 @@ define([
|
|||
// Store the friend request displayed to avoid duplicates
|
||||
var friendRequest = {};
|
||||
handlers['FRIEND_REQUEST'] = function (ctx, box, data, cb) {
|
||||
// Old format: data was stored directly in "content"
|
||||
var userData = data.msg.content.user || data.msg.content;
|
||||
|
||||
// Check if the request is valid (send by the correct user)
|
||||
if (data.msg.author !== data.msg.content.curvePublic) {
|
||||
if (data.msg.author !== userData.curvePublic) {
|
||||
return void cb(true);
|
||||
}
|
||||
|
||||
|
@ -40,7 +43,8 @@ define([
|
|||
if (Messaging.getFriend(ctx.store.proxy, data.msg.author) ||
|
||||
ctx.store.proxy.friends_pending[data.msg.author]) {
|
||||
delete ctx.store.proxy.friends_pending[data.msg.author];
|
||||
Messaging.acceptFriendRequest(ctx.store, data.msg.content, function (obj) {
|
||||
|
||||
Messaging.acceptFriendRequest(ctx.store, userData, function (obj) {
|
||||
if (obj && obj.error) {
|
||||
return void cb();
|
||||
}
|
||||
|
@ -48,10 +52,10 @@ define([
|
|||
proxy: ctx.store.proxy,
|
||||
realtime: ctx.store.realtime,
|
||||
pinPads: ctx.pinPads
|
||||
}, data.msg.content, function (err) {
|
||||
if (err) { console.error(err); }
|
||||
}, userData, function (err) {
|
||||
if (err) { return void console.error(err); }
|
||||
if (ctx.store.messenger) {
|
||||
ctx.store.messenger.onFriendAdded(data.msg.content);
|
||||
ctx.store.messenger.onFriendAdded(userData);
|
||||
}
|
||||
});
|
||||
ctx.updateMetadata();
|
||||
|
@ -63,96 +67,110 @@ define([
|
|||
cb();
|
||||
};
|
||||
removeHandlers['FRIEND_REQUEST'] = function (ctx, box, data) {
|
||||
if (friendRequest[data.content.curvePublic]) {
|
||||
delete friendRequest[data.content.curvePublic];
|
||||
var userData = data.content.user || data.content;
|
||||
if (friendRequest[userData.curvePublic]) {
|
||||
delete friendRequest[userData.curvePublic];
|
||||
}
|
||||
};
|
||||
|
||||
// The DECLINE and ACCEPT messages act on the contacts data
|
||||
// They are processed with a random timeout to avoid having
|
||||
// multiple workers trying to add or remove the contacts at
|
||||
// the same time. Once processed, they are dismissed.
|
||||
// We must dismiss them and send another message to our own
|
||||
// mailbox for the UI part otherwise it would automatically
|
||||
// accept or decline future requests from the same user
|
||||
// until the message is manually dismissed.
|
||||
|
||||
var friendRequestDeclined = {};
|
||||
handlers['DECLINE_FRIEND_REQUEST'] = function (ctx, box, data, cb) {
|
||||
setTimeout(function () {
|
||||
// Our friend request was declined.
|
||||
if (!ctx.store.proxy.friends_pending[data.msg.author]) { return; }
|
||||
// Old format: data was stored directly in "content"
|
||||
var userData = data.msg.content.user || data.msg.content;
|
||||
if (!userData.curvePublic) { userData.curvePublic = data.msg.author; }
|
||||
|
||||
// Our friend request was declined.
|
||||
setTimeout(function () {
|
||||
// Only dismissed once in the timeout to make sure we won't lose
|
||||
// the data if we close the worker before adding the friend
|
||||
cb(true);
|
||||
|
||||
// Make sure we really sent it
|
||||
if (!ctx.store.proxy.friends_pending[data.msg.author]) { return; }
|
||||
// Remove the pending message and display the "declined" state in the UI
|
||||
delete ctx.store.proxy.friends_pending[data.msg.author];
|
||||
|
||||
ctx.updateMetadata();
|
||||
if (friendRequestDeclined[data.msg.author]) { return; }
|
||||
friendRequestDeclined[data.msg.author] = true;
|
||||
box.sendMessage({
|
||||
type: 'FRIEND_REQUEST_DECLINED',
|
||||
content: {
|
||||
user: data.msg.author,
|
||||
name: data.msg.content.displayName
|
||||
}
|
||||
}, function () {
|
||||
if (friendRequestDeclined[data.msg.author]) {
|
||||
// TODO remove our message because another one was sent first?
|
||||
}
|
||||
friendRequestDeclined[data.msg.author] = true;
|
||||
});
|
||||
content: { user: userData }
|
||||
}, function () {});
|
||||
}, getRandomTimeout(ctx));
|
||||
cb(true);
|
||||
};
|
||||
// UI for declined friend request
|
||||
handlers['FRIEND_REQUEST_DECLINED'] = function (ctx, box, data, cb) {
|
||||
ctx.updateMetadata();
|
||||
if (friendRequestDeclined[data.msg.content.user]) { return void cb(true); }
|
||||
friendRequestDeclined[data.msg.content.user] = true;
|
||||
var curve = data.msg.content.user.curvePublic || data.msg.content.user;
|
||||
if (friendRequestDeclined[curve]) { return void cb(true); }
|
||||
friendRequestDeclined[curve] = true;
|
||||
cb();
|
||||
};
|
||||
removeHandlers['FRIEND_REQUEST_DECLINED'] = function (ctx, box, data) {
|
||||
if (friendRequestDeclined[data.content.user]) {
|
||||
delete friendRequestDeclined[data.content.user];
|
||||
}
|
||||
var curve = data.content.user.curvePublic || data.content.user;
|
||||
if (friendRequestDeclined[curve]) { delete friendRequestDeclined[curve]; }
|
||||
};
|
||||
|
||||
var friendRequestAccepted = {};
|
||||
handlers['ACCEPT_FRIEND_REQUEST'] = function (ctx, box, data, cb) {
|
||||
// Old format: data was stored directly in "content"
|
||||
var userData = data.msg.content.user || data.msg.content;
|
||||
|
||||
// Our friend request was accepted.
|
||||
setTimeout(function () {
|
||||
// Only dismissed once in the timeout to make sure we won't lose
|
||||
// the data if we close the worker before adding the friend
|
||||
cb(true);
|
||||
|
||||
// Make sure we really sent it
|
||||
if (!ctx.store.proxy.friends_pending[data.msg.author]) { return; }
|
||||
// Remove the pending state. It will also us to send a new request in case of error
|
||||
delete ctx.store.proxy.friends_pending[data.msg.author];
|
||||
|
||||
// And add the friend
|
||||
Messaging.addToFriendList({
|
||||
proxy: ctx.store.proxy,
|
||||
realtime: ctx.store.realtime,
|
||||
pinPads: ctx.pinPads
|
||||
}, data.msg.content, function (err) {
|
||||
if (err) { console.error(err); }
|
||||
delete ctx.store.proxy.friends_pending[data.msg.author];
|
||||
if (ctx.store.messenger) {
|
||||
ctx.store.messenger.onFriendAdded(data.msg.content);
|
||||
}
|
||||
}, userData, function (err) {
|
||||
if (err) { return void console.error(err); }
|
||||
// Load the chat if contacts app loaded
|
||||
if (ctx.store.messenger) { ctx.store.messenger.onFriendAdded(userData); }
|
||||
// Update the userlist
|
||||
ctx.updateMetadata();
|
||||
// If you have a profile page open, update it
|
||||
if (ctx.store.modules['profile']) { ctx.store.modules['profile'].update(); }
|
||||
if (friendRequestAccepted[data.msg.author]) { return; }
|
||||
// Display the "accepted" state in the UI
|
||||
if (friendRequestAccepted[data.msg.author]) { return; }
|
||||
friendRequestAccepted[data.msg.author] = true;
|
||||
box.sendMessage({
|
||||
type: 'FRIEND_REQUEST_ACCEPTED',
|
||||
content: {
|
||||
user: data.msg.author,
|
||||
name: data.msg.content.displayName
|
||||
}
|
||||
}, function () {
|
||||
if (friendRequestAccepted[data.msg.author]) {
|
||||
// TODO remove our message because another one was sent first?
|
||||
}
|
||||
friendRequestAccepted[data.msg.author] = true;
|
||||
});
|
||||
content: { user: userData }
|
||||
}, function () {});
|
||||
});
|
||||
}, getRandomTimeout(ctx));
|
||||
cb(true);
|
||||
};
|
||||
// UI for accepted friend request
|
||||
handlers['FRIEND_REQUEST_ACCEPTED'] = function (ctx, box, data, cb) {
|
||||
ctx.updateMetadata();
|
||||
if (friendRequestAccepted[data.msg.content.user]) { return void cb(true); }
|
||||
friendRequestAccepted[data.msg.content.user] = true;
|
||||
var curve = data.msg.content.user.curvePublic || data.msg.content.user;
|
||||
if (friendRequestAccepted[curve]) { return void cb(true); }
|
||||
friendRequestAccepted[curve] = true;
|
||||
cb();
|
||||
};
|
||||
removeHandlers['FRIEND_REQUEST_ACCEPTED'] = function (ctx, box, data) {
|
||||
if (friendRequestAccepted[data.content.user]) {
|
||||
delete friendRequestAccepted[data.content.user];
|
||||
}
|
||||
var curve = data.content.user.curvePublic || data.content.user;
|
||||
if (friendRequestAccepted[curve]) { delete friendRequestAccepted[curve]; }
|
||||
};
|
||||
|
||||
handlers['UNFRIEND'] = function (ctx, box, data, cb) {
|
||||
|
|
|
@ -2,11 +2,12 @@ define([
|
|||
'/common/common-util.js',
|
||||
'/common/common-hash.js',
|
||||
'/common/common-realtime.js',
|
||||
'/common/common-messaging.js',
|
||||
'/common/notify.js',
|
||||
'/common/outer/mailbox-handlers.js',
|
||||
'/bower_components/chainpad-netflux/chainpad-netflux.js',
|
||||
'/bower_components/chainpad-crypto/crypto.js',
|
||||
], function (Util, Hash, Realtime, Notify, Handlers, CpNetflux, Crypto) {
|
||||
], function (Util, Hash, Realtime, Messaging, Notify, Handlers, CpNetflux, Crypto) {
|
||||
var Mailbox = {};
|
||||
|
||||
var TYPES = [
|
||||
|
@ -96,6 +97,12 @@ proxy.mailboxes = {
|
|||
|
||||
var crypto = Crypto.Mailbox.createEncryptor(keys);
|
||||
|
||||
// Always send your data
|
||||
if (typeof(msg) === "object" && !msg.user) {
|
||||
var myData = Messaging.createData(ctx.store.proxy, false);
|
||||
msg.user = myData;
|
||||
}
|
||||
|
||||
var text = JSON.stringify({
|
||||
type: type,
|
||||
content: msg
|
||||
|
@ -187,6 +194,11 @@ proxy.mailboxes = {
|
|||
history: [], // All the hashes loaded from the server in corretc order
|
||||
content: {}, // Content of the messages that should be displayed
|
||||
sendMessage: function (msg) { // To send a message to our box
|
||||
// Always send your data
|
||||
if (typeof(msg) === "object" && !msg.user) {
|
||||
var myData = Messaging.createData(ctx.store.proxy, false);
|
||||
msg.user = myData;
|
||||
}
|
||||
try {
|
||||
msg = JSON.stringify(msg);
|
||||
} catch (e) {
|
||||
|
|
|
@ -893,7 +893,7 @@ define([
|
|||
};
|
||||
|
||||
var clearOwnedChannel = function (ctx, id, cb) {
|
||||
var channel = ctx.clients[id];
|
||||
var channel = ctx.channels[id];
|
||||
if (!channel) { return void cb({error: 'NO_CHANNEL'}); }
|
||||
if (!ctx.store.rpc) { return void cb({error: 'RPC_NOT_READY'}); }
|
||||
ctx.store.rpc.clearOwnedChannel(id, function (err) {
|
||||
|
|
|
@ -909,13 +909,11 @@ define([
|
|||
}));
|
||||
}).nThen(function (waitFor) {
|
||||
// Send mailbox to offer ownership
|
||||
var myData = Messaging.createData(ctx.store.proxy, false);
|
||||
ctx.store.mailbox.sendTo("ADD_OWNER", {
|
||||
teamChannel: teamData.channel,
|
||||
chatChannel: Util.find(teamData, ['keys', 'chat', 'channel']),
|
||||
rosterChannel: Util.find(teamData, ['keys', 'roster', 'channel']),
|
||||
title: teamData.metadata.name,
|
||||
user: myData
|
||||
title: teamData.metadata.name
|
||||
}, {
|
||||
channel: user.notifications,
|
||||
curvePublic: user.curvePublic
|
||||
|
@ -969,12 +967,10 @@ define([
|
|||
}));
|
||||
}).nThen(function (waitFor) {
|
||||
// Send mailbox to offer ownership
|
||||
var myData = Messaging.createData(ctx.store.proxy, false);
|
||||
ctx.store.mailbox.sendTo("RM_OWNER", {
|
||||
teamChannel: teamData.channel,
|
||||
title: teamData.metadata.name,
|
||||
pending: isPendingOwner,
|
||||
user: myData
|
||||
pending: isPendingOwner
|
||||
}, {
|
||||
channel: user.notifications,
|
||||
curvePublic: user.curvePublic
|
||||
|
@ -1104,11 +1100,9 @@ define([
|
|||
if (!team) { return void cb ({error: 'ENOENT'}); }
|
||||
|
||||
// Send mailbox to offer ownership
|
||||
var myData = Messaging.createData(ctx.store.proxy, false);
|
||||
ctx.store.mailbox.sendTo("TEAM_EDIT_RIGHTS", {
|
||||
state: state,
|
||||
teamData: getInviteData(ctx, teamId, state),
|
||||
user: myData
|
||||
teamData: getInviteData(ctx, teamId, state)
|
||||
}, {
|
||||
channel: user.notifications,
|
||||
curvePublic: user.curvePublic
|
||||
|
@ -1175,7 +1169,6 @@ define([
|
|||
team.roster.add(obj, function (err) {
|
||||
if (err && err !== 'NO_CHANGE') { return void cb({error: err}); }
|
||||
ctx.store.mailbox.sendTo('INVITE_TO_TEAM', {
|
||||
user: Messaging.createData(ctx.store.proxy, false),
|
||||
team: getInviteData(ctx, teamId)
|
||||
}, {
|
||||
channel: user.notifications,
|
||||
|
@ -1202,7 +1195,6 @@ define([
|
|||
if (!userData || !userData.notifications) { return cb(); }
|
||||
ctx.store.mailbox.sendTo('KICKED_FROM_TEAM', {
|
||||
pending: data.pending,
|
||||
user: Messaging.createData(ctx.store.proxy, false),
|
||||
teamChannel: getInviteData(ctx, teamId).channel,
|
||||
teamName: getInviteData(ctx, teamId).metadata.name
|
||||
}, {
|
||||
|
|
|
@ -89,18 +89,6 @@ var factory = function (Util, Rpc) {
|
|||
});
|
||||
};
|
||||
|
||||
// get the total stored size of a channel's patches (in bytes)
|
||||
exp.getFileSize = function (file, cb) {
|
||||
rpc.send('GET_FILE_SIZE', file, function (e, response) {
|
||||
if (e) { return void cb(e); }
|
||||
if (response && response.length && typeof(response[0]) === 'number') {
|
||||
return void cb(void 0, response[0]);
|
||||
} else {
|
||||
cb('INVALID_RESPONSE');
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// get the combined size of all channels (in bytes) for all the
|
||||
// channels which the server has pinned for your publicKey
|
||||
exp.getFileListSize = function (cb) {
|
||||
|
|
|
@ -396,9 +396,9 @@ define([
|
|||
if (state === STATE.DELETED) { return; }
|
||||
stateChange(info.state ? STATE.INITIALIZING : STATE.DISCONNECTED, info.permanent);
|
||||
/*if (info.state) {
|
||||
UI.findOKButton().click();
|
||||
UIElements.reconnectAlert();
|
||||
} else {
|
||||
UI.alert(Messages.common_connectionLost, undefined, true);
|
||||
UIElements.disconnectAlert();
|
||||
}*/
|
||||
};
|
||||
|
||||
|
|
|
@ -62,15 +62,16 @@ define([
|
|||
});
|
||||
|
||||
editor._noCursorUpdate = false;
|
||||
editor.state.focused = true;
|
||||
editor.scrollTo(scroll.left, scroll.top);
|
||||
|
||||
if (!editor.state.focused) { return; }
|
||||
|
||||
if(selects[0] === selects[1]) {
|
||||
editor.setCursor(posToCursor(selects[0], remoteDoc));
|
||||
}
|
||||
else {
|
||||
editor.setSelection(posToCursor(selects[0], remoteDoc), posToCursor(selects[1], remoteDoc));
|
||||
}
|
||||
|
||||
editor.scrollTo(scroll.left, scroll.top);
|
||||
};
|
||||
|
||||
module.getHeadingText = function (editor) {
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
define([
|
||||
'jquery',
|
||||
'/common/common-util.js',
|
||||
'/common/common-hash.js',
|
||||
'/common/common-interface.js',
|
||||
'/common/common-ui-elements.js',
|
||||
'/common/notifications.js',
|
||||
'/common/hyperscript.js',
|
||||
'/customize/messages.js',
|
||||
], function ($, Util, UI, UIElements, Notifications, h, Messages) {
|
||||
], function ($, Util, Hash, UI, UIElements, Notifications, h, Messages) {
|
||||
var Mailbox = {};
|
||||
|
||||
Mailbox.create = function (Common) {
|
||||
|
@ -53,9 +54,23 @@ define([
|
|||
};
|
||||
var createElement = mailbox.createElement = function (data) {
|
||||
var notif;
|
||||
var avatar;
|
||||
var userData = Util.find(data, ['content', 'msg', 'content', 'user']);
|
||||
if (userData && typeof(userData) === "object" && userData.profile) {
|
||||
avatar = h('span.cp-avatar');
|
||||
Common.displayAvatar($(avatar), userData.avatar, userData.displayName || userData.name);
|
||||
$(avatar).click(function (e) {
|
||||
e.stopPropagation();
|
||||
Common.openURL(Hash.hashToHref(userData.profile, 'profile'));
|
||||
});
|
||||
}
|
||||
notif = h('div.cp-notification', {
|
||||
'data-hash': data.content.hash
|
||||
}, [h('div.cp-notification-content', h('p', formatData(data)))]);
|
||||
}, [
|
||||
avatar,
|
||||
h('div.cp-notification-content',
|
||||
h('p', formatData(data)))
|
||||
]);
|
||||
|
||||
if (typeof(data.content.getFormatText) === "function") {
|
||||
$(notif).find('.cp-notification-content p').html(data.content.getFormatText());
|
||||
|
|
|
@ -83,6 +83,9 @@ define([
|
|||
};
|
||||
|
||||
// UI
|
||||
window.CryptPad_UI = UI;
|
||||
window.CryptPad_UIElements = UIElements;
|
||||
window.CryptPad_common = funcs;
|
||||
funcs.createUserAdminMenu = callWithCommon(UIElements.createUserAdminMenu);
|
||||
funcs.initFilePicker = callWithCommon(UIElements.initFilePicker);
|
||||
funcs.openFilePicker = callWithCommon(UIElements.openFilePicker);
|
||||
|
|
|
@ -5,6 +5,7 @@ define([
|
|||
'/common/common-util.js',
|
||||
'/common/common-hash.js',
|
||||
'/common/common-interface.js',
|
||||
'/common/common-ui-elements.js',
|
||||
'/common/common-feedback.js',
|
||||
'/bower_components/nthen/index.js',
|
||||
'/common/sframe-common.js',
|
||||
|
@ -22,6 +23,7 @@ define([
|
|||
Util,
|
||||
Hash,
|
||||
UI,
|
||||
UIElements,
|
||||
Feedback,
|
||||
nThen,
|
||||
SFCommon,
|
||||
|
@ -272,13 +274,13 @@ define([
|
|||
setEditable(false);
|
||||
if (drive.refresh) { drive.refresh(); }
|
||||
APP.toolbar.failed();
|
||||
if (!noAlert) { UI.alert(Messages.common_connectionLost, undefined, true); }
|
||||
if (!noAlert) { UIElements.disconnectAlert(); }
|
||||
};
|
||||
var onReconnect = function () {
|
||||
setEditable(true);
|
||||
if (drive.refresh) { drive.refresh(); }
|
||||
APP.toolbar.reconnecting();
|
||||
UI.findOKButton().click();
|
||||
UIElements.reconnectAlert();
|
||||
};
|
||||
|
||||
sframeChan.on('EV_DRIVE_LOG', function (msg) {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
@import (reference) '../../customize/src/less2/include/framework.less';
|
||||
@import (reference) '../../customize/src/less2/include/sidebar-layout.less';
|
||||
@import (reference) '../../customize/src/less2/include/avatar.less';
|
||||
|
||||
&.cp-app-notifications {
|
||||
|
||||
|
@ -86,6 +87,14 @@
|
|||
display: block;
|
||||
}
|
||||
}
|
||||
.cp-avatar {
|
||||
.avatar_main(48px);
|
||||
padding: 0 10px;
|
||||
cursor: pointer;
|
||||
&:hover {
|
||||
background-color: rgba(0,0,0,0.1);
|
||||
}
|
||||
}
|
||||
&.cp-app-notification-archived {
|
||||
background-color: #f1f1f1;
|
||||
}
|
||||
|
|
|
@ -741,7 +741,7 @@ define([
|
|||
if (b64images.length && framework._.sfCommon.isLoggedIn()) {
|
||||
var no = h('button.cp-corner-cancel', Messages.cancel);
|
||||
var yes = h('button.cp-corner-primary', Messages.ok);
|
||||
var actions = h('div', [yes, no]);
|
||||
var actions = h('div', [no, yes]);
|
||||
var modal = UI.cornerPopup(Messages.pad_base64, actions, '', {big: true});
|
||||
$(no).click(function () {
|
||||
modal.delete();
|
||||
|
|
|
@ -13,6 +13,7 @@ define([
|
|||
'/common/sframe-common-codemirror.js',
|
||||
'/common/common-thumbnail.js',
|
||||
'/common/common-interface.js',
|
||||
'/common/common-ui-elements.js',
|
||||
'/common/hyperscript.js',
|
||||
'/customize/messages.js',
|
||||
'cm/lib/codemirror',
|
||||
|
@ -42,6 +43,7 @@ define([
|
|||
SframeCM,
|
||||
Thumb,
|
||||
UI,
|
||||
UIElements,
|
||||
h,
|
||||
Messages,
|
||||
CMeditor,
|
||||
|
@ -1098,13 +1100,13 @@ define([
|
|||
});
|
||||
}
|
||||
setEditable(false);
|
||||
//UI.alert(Messages.common_connectionLost, undefined, true);
|
||||
//UIElements.disconnectAlert();
|
||||
};
|
||||
|
||||
var onReconnect = function () {
|
||||
if (APP.unrecoverable) { return; }
|
||||
setEditable(true);
|
||||
//UI.findOKButton().click();
|
||||
//UIElements.reconnectAlert();
|
||||
};
|
||||
|
||||
var getHeadingText = function () {
|
||||
|
|
|
@ -570,6 +570,29 @@ define([
|
|||
return;
|
||||
}
|
||||
|
||||
if (!common.isLoggedIn()) {
|
||||
var login = h('button.cp-corner-primary', Messages.login_login);
|
||||
var register = h('button.cp-corner-primary', Messages.login_register);
|
||||
var cancel = h('button.cp-corner-cancel', Messages.cancel);
|
||||
var actions = h('div', [cancel, register, login]);
|
||||
var modal = UI.cornerPopup(Messages.profile_login || "You need to log in to add this user to your contacts", actions, '', {alt: true}); // XXX
|
||||
$(register).click(function () {
|
||||
common.setLoginRedirect(function () {
|
||||
common.gotoURL('/register/');
|
||||
});
|
||||
modal.delete();
|
||||
});
|
||||
$(login).click(function () {
|
||||
common.setLoginRedirect(function () {
|
||||
common.gotoURL('/login/');
|
||||
});
|
||||
modal.delete();
|
||||
});
|
||||
$(cancel).click(function () {
|
||||
modal.delete();
|
||||
});
|
||||
}
|
||||
|
||||
var listmapConfig = {
|
||||
data: {},
|
||||
common: common,
|
||||
|
|
|
@ -138,6 +138,7 @@
|
|||
padding: 5px;
|
||||
padding-left: 15px;
|
||||
&[type="number"] {
|
||||
height: @variables_input-height + 2px; // to avoid cropped numbers
|
||||
border-right: 1px solid #adadad;
|
||||
}
|
||||
&[type="checkbox"] {
|
||||
|
|
|
@ -1379,13 +1379,13 @@ define([
|
|||
setEditable(false);
|
||||
if (APP.team && driveAPP.refresh) { driveAPP.refresh(); }
|
||||
toolbar.failed();
|
||||
if (!noAlert) { UI.alert(Messages.common_connectionLost, undefined, true); }
|
||||
if (!noAlert) { UIElements.disconnectAlert(); }
|
||||
};
|
||||
var onReconnect = function () {
|
||||
setEditable(true);
|
||||
if (APP.team && driveAPP.refresh) { driveAPP.refresh(); }
|
||||
toolbar.reconnecting();
|
||||
UI.findOKButton().click();
|
||||
UIElements.reconnectAlert();
|
||||
};
|
||||
|
||||
sframeChan.on('EV_DRIVE_LOG', function (msg) {
|
||||
|
|
Loading…
Reference in New Issue