diff --git a/CHANGELOG.md b/CHANGELOG.md index fcfdee9b1..41659802b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,42 @@ +# L release (3.11.0) + +## Goals + +* major server refactor to prepare for: + * trim-history + * allow lists + +## Update notes + +* dropped support for retainData + * archives are on by default +* you will need a new chainpad server + +## Features + +* restyled corner popup +* cool new scheduler library + * operations on channels are queued +* trim-history rpc +* unified historykeeper and rpc +* more visible styles for unanswered support tickets +* hidden hashes/safe links + * new "security" tab in settings +* queue'd popups + * reconnect alert +* link to user profile in notifications +* prompt anonymous users to register when viewing a profile +* spreadsheets + * reconnecting spreadsheets + * faster spreadsheets +* don't hijack chat cursor +* friends are now "contacts" + +## Bug fixes + +* friend request/accept race condition +* throw errors in 'mkAsync' if no function is passed + # Kouprey release (3.10.0) ## Goals diff --git a/config/config.example.js b/config/config.example.js index 9981c0626..90e96a66a 100644 --- a/config/config.example.js +++ b/config/config.example.js @@ -231,26 +231,17 @@ module.exports = { */ inactiveTime: 90, // days - /* CryptPad can be configured to remove inactive data which has not been pinned. - * Deletion of data is always risky and as an operator you have the choice to - * archive data instead of deleting it outright. Set this value to true if - * you want your server to archive files and false if you want to keep using - * the old behaviour of simply removing files. + /* CryptPad archives some data instead of deleting it outright. + * This archived data still takes up space and so you'll probably still want to + * remove these files after a brief period. + * + * cryptpad/scripts/evict-inactive.js is intended to be run daily + * from a crontab or similar scheduling service. * - * WARNING: this is not implemented universally, so at the moment this will - * only apply to the removal of 'channels' due to inactivity. - */ - retainData: true, - - /* As described above, CryptPad offers the ability to archive some data - * instead of deleting it outright. This archived data still takes up space - * and so you'll probably still want to remove these files after a brief period. * The intent with this feature is to provide a safety net in case of accidental * deletion. Set this value to the number of days you'd like to retain * archived data before it's removed permanently. * - * If 'retainData' is set to false, there will never be any archived data - * to remove. */ archiveRetentionTime: 15, diff --git a/customize.dist/pages.js b/customize.dist/pages.js index fc3ee6175..4b62ff785 100644 --- a/customize.dist/pages.js +++ b/customize.dist/pages.js @@ -107,7 +107,7 @@ define([ ])*/ ]) ]), - h('div.cp-version-footer', "CryptPad v3.10.0 (Kouprey)") + h('div.cp-version-footer', "CryptPad v3.11.0 (LabradorDuck)") ]); }; diff --git a/customize.dist/src/less2/include/buttons.less b/customize.dist/src/less2/include/buttons.less index ad6aaf9cc..8d906de62 100644 --- a/customize.dist/src/less2/include/buttons.less +++ b/customize.dist/src/less2/include/buttons.less @@ -10,7 +10,7 @@ @alertify-input-fg: @colortheme_modal-input-fg; input:not(.form-control), textarea { - background-color: @alertify-input-fg; + // background-color: @alertify-input-fg; color: @cryptpad_text_col; border: 1px solid @alertify-input-bg; width: 100%; @@ -23,6 +23,10 @@ } } + input:not(.form-control) { + height: @variables_input-height; + } + div.cp-alertify-type { display: flex; input { diff --git a/customize.dist/src/less2/include/colortheme.less b/customize.dist/src/less2/include/colortheme.less index 05a46c25d..709afa7c8 100644 --- a/customize.dist/src/less2/include/colortheme.less +++ b/customize.dist/src/less2/include/colortheme.less @@ -135,7 +135,7 @@ @colortheme_oocell-bg: #40865c; @colortheme_oocell-color: #FFF; -@colortheme_oocell-warn: #cd2532; +@colortheme_oocell-warn: #ffbcc0; @colortheme_kanban-bg: #8C4; @colortheme_kanban-color: #000; diff --git a/customize.dist/src/less2/include/corner.less b/customize.dist/src/less2/include/corner.less index 0740586aa..feec62165 100644 --- a/customize.dist/src/less2/include/corner.less +++ b/customize.dist/src/less2/include/corner.less @@ -4,9 +4,9 @@ --LessLoader_require: LessLoader_currentFile(); }; & { - @corner-button-ok: #2c9b00; - @corner-button-cancel: #990000; @corner-link: #ffff7a; + @corner-blue: @colortheme_logo-1; + @corner-white: @colortheme_base; @keyframes appear { 0% { @@ -27,21 +27,23 @@ .cp-corner-container { position: absolute; - right: 0; - bottom: 0; - width: 300px; - height: 200px; - border-top-left-radius: 200px; - padding: 15px; - text-align: right; - background-color: @colortheme_logo-1; - color: @colortheme_base; + right: 10px; + bottom: 10px; + width: 350px; + padding: 10px; + background-color: fade(@corner-blue, 95%); + border: 1px solid @corner-blue; + color: @corner-white; z-index: 9999; transform-origin: bottom right; animation: appear 0.8s ease-in-out; - box-shadow: 0 0 10px 0 @colortheme_logo-1; - //transform: scale(0.1); - //transform: scale(1); + //box-shadow: 0 0 10px 0 @corner-blue; + + &.cp-corner-alt { + background-color: fade(@corner-white, 95%); + border: 1px solid @corner-blue; + color: @corner-blue; + } h1, h2, h3 { font-size: 1.5em; @@ -64,7 +66,7 @@ line-height: 15px; display: none; &:hover { - color: darken(@colortheme_base, 15%); + color: darken(@corner-white, 15%); } } .cp-corner-minimize { @@ -86,46 +88,95 @@ } } &.cp-corner-big { - width: 400px; - height: 250px; + width: 500px; + } + + .cp-corner-dontshow { + cursor: pointer; + .fa { + margin-right: 0.3em; + font-size: 1.1em; + } + &:hover { + color: darken(@corner-white, 10%); + } + } + &.cp-corner-alt { + .cp-corner-dontshow { + &:hover { + color: lighten(@corner-blue, 10%); + } + } } .cp-corner-actions { min-height: 30px; - margin: 15px auto; - display: inline-block; + margin: 10px auto; + display: block; + text-align: right; } .cp-corner-footer { - font-style: italic; font-size: 0.8em; } .cp-corner-footer, .cp-corner-text { a { - color: @corner-link; + color: @corner-white; + text-decoration: underline; &:hover { - color: darken(@corner-link, 20%); + color: darken(@corner-white, 10%); } } } + &.cp-corner-alt a { + color: @corner-blue; + &:hover { + color: lighten(@corner-blue, 10%); + } + } button { - border: 0px; padding: 5px; - color: @colortheme_base; - margin-left: 5px; + color: @corner-white; + &:not(:first-child) { + margin-left: 10px; + } outline: none; + text-transform: uppercase; + border: 1px solid @corner-white; + .fa, .cptools { + margin-right: 0.3em; + } &.cp-corner-primary { - background-color: @corner-button-ok; - font-weight: bold; + background-color: @corner-white; + color: @corner-blue; &:hover { - background-color: lighten(@corner-button-ok, 10%); + background-color: lighten(@corner-blue, 50%); + border-color: lighten(@corner-blue, 50%); } } &.cp-corner-cancel { - background-color: @corner-button-cancel; - margin-left: 10px; + background-color: @corner-blue; + color: @corner-white; + &:hover { + background-color: darken(@corner-blue, 10%); + } + } + } + &.cp-corner-alt button { + border-color: @corner-blue; + &.cp-corner-primary { + background-color: @corner-blue; + color: @corner-white; + &:hover { + background-color: darken(@corner-blue, 10%); + border-color: darken(@corner-blue, 10%); + } + } + &.cp-corner-cancel { + background-color: @corner-white; + color: @corner-blue; &:hover { - background-color: lighten(@corner-button-cancel, 10%); + background-color: lighten(@corner-blue, 50%); } } } diff --git a/customize.dist/src/less2/include/fileupload.less b/customize.dist/src/less2/include/fileupload.less index ebe93399b..8fb1c8857 100644 --- a/customize.dist/src/less2/include/fileupload.less +++ b/customize.dist/src/less2/include/fileupload.less @@ -14,9 +14,11 @@ right: 10vw; bottom: 10vh; box-sizing: border-box; - z-index: 1000000; //Z file upload table container + z-index: 100000; //Z file upload table container display: none; color: darken(@colortheme_drive-bg, 10%); + max-height: 180px; + overflow-y: auto; @media screen and (max-width: @browser_media-medium-screen) { left: 5vw; right: 5vw; bottom: 5vw; @@ -26,6 +28,9 @@ display: flex; background-color: darken(@colortheme_modal-bg, 10%); font-weight: bold; + position: sticky; + top: 0; + z-index: 1; .cp-fileupload-header-title { padding: 0.25em 0.5em; flex-grow: 1; diff --git a/customize.dist/src/less2/include/notifications.less b/customize.dist/src/less2/include/notifications.less index 1e4430db2..a24ad32d3 100644 --- a/customize.dist/src/less2/include/notifications.less +++ b/customize.dist/src/less2/include/notifications.less @@ -8,6 +8,7 @@ @notif-height: 50px; .cp-notifications-container { max-width: 300px; + width: 300px; display: flex; flex-flow: column; & hr { @@ -16,6 +17,14 @@ .cp-notification { min-height: @notif-height; display: flex; + .cp-avatar { + .avatar_main(30px); + padding: 0 5px; + cursor: pointer; + &:hover { + background-color: rgba(0,0,0,0.1); + } + } .cp-notification-content { flex: 1; align-items: stretch; diff --git a/customize.dist/src/less2/include/sidebar-layout.less b/customize.dist/src/less2/include/sidebar-layout.less index 1f9c92457..7961e1bee 100644 --- a/customize.dist/src/less2/include/sidebar-layout.less +++ b/customize.dist/src/less2/include/sidebar-layout.less @@ -1,6 +1,7 @@ @import (reference) "/customize/src/less2/include/colortheme-all.less"; @import (reference) "/customize/src/less2/include/leftside-menu.less"; @import (reference) "/customize/src/less2/include/buttons.less"; +@import (reference) "/customize/src/less2/include/browser.less"; @sidebar_button-width: 400px; @@ -73,6 +74,7 @@ padding: 5px 20px; color: @rightside-color; overflow: auto; + padding-bottom: 200px; // Following rules are only in settings .cp-sidebarlayout-element { @@ -96,7 +98,7 @@ } } margin-bottom: 20px; - .buttons_main(); + .buttons_main(); } [type="text"], [type="password"], button { vertical-align: middle; @@ -107,6 +109,7 @@ .cp-sidebarlayout-input-block { display: inline-flex; width: @sidebar_button-width; + max-width: 100%; input { flex: 1; //border-radius: 0.25em 0 0 0.25em; @@ -117,6 +120,8 @@ //border-radius: 0 0.25em 0.25em 0; //border: 1px solid #adadad; border-left: 0px; + height: @variables_input-height; + margin: 0 !important; } } &>div { @@ -161,6 +166,25 @@ } */ } + @media screen and (max-width: @browser_media-medium-screen) { + flex-flow: column; + overflow: auto; + #cp-sidebarlayout-leftside { + width: 100% !important; // Override "narrow" mode + padding-bottom: 20px; + .cp-sidebarlayout-categories { + .cp-sidebarlayout-category { + margin: 0; + span.cp-sidebar-layout-category-name { + display: inline !important; // override "narrow" mode + } + } + } + } + #cp-sidebarlayout-rightside { + overflow: unset; + } + } } } diff --git a/customize.dist/src/less2/include/variables.less b/customize.dist/src/less2/include/variables.less index ba6c642e2..570779f05 100644 --- a/customize.dist/src/less2/include/variables.less +++ b/customize.dist/src/less2/include/variables.less @@ -3,6 +3,7 @@ // Elements size @variables_bar-height: 32px; +@variables_input-height: 38px; // Used in modal.less and alertify.less @variables_padding: 12px; diff --git a/docs/example.nginx.conf b/docs/example.nginx.conf index bef853d26..6d45b8198 100644 --- a/docs/example.nginx.conf +++ b/docs/example.nginx.conf @@ -106,7 +106,7 @@ server { if ($uri ~ ^\/common\/onlyoffice\/.*\/index\.html.*$) { set $unsafe 1; } # everything except the sandbox domain is a privileged scope, as they might be used to handle keys - if ($host != sandbox.cryptpad.info) { set $unsafe 0; } + if ($host != $sandbox_domain) { set $unsafe 0; } # privileged contexts allow a few more rights than unprivileged contexts, though limits are still applied if ($unsafe) { diff --git a/historyKeeper.js b/historyKeeper.js deleted file mode 100644 index fe16a204c..000000000 --- a/historyKeeper.js +++ /dev/null @@ -1,1021 +0,0 @@ -/* jshint esversion: 6 */ -/* global Buffer */ -;(function () { 'use strict'; - -const nThen = require('nthen'); -const Nacl = require('tweetnacl/nacl-fast'); -const Crypto = require('crypto'); -const Once = require("./lib/once"); -const Meta = require("./lib/metadata"); -const WriteQueue = require("./lib/write-queue"); -const BatchRead = require("./lib/batch-read"); - -let Log; -const now = function () { return (new Date()).getTime(); }; -const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds - -/* getHash - * this function slices off the leading portion of a message which is - most likely unique - * these "hashes" are used to identify particular messages in a channel's history - * clients store "hashes" either in memory or in their drive to query for new messages: - * when reconnecting to a pad - * when connecting to chat or a mailbox - * thus, we can't change this function without invalidating client data which: - * is encrypted clientside - * can't be easily migrated - * don't break it! -*/ -const getHash = function (msg) { - if (typeof(msg) !== 'string') { - Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); - return ''; - } - return msg.slice(0,64); -}; - -const tryParse = function (str) { - try { - return JSON.parse(str); - } catch (err) { - Log.error('HK_PARSE_ERROR', err); - } -}; - -/* sliceCpIndex - returns a list of all checkpoints which might be relevant for a client connecting to a session - - * if there are two or fewer checkpoints, return everything you have - * if there are more than two - * return at least two - * plus any more which were received within the last 100 messages - - This is important because the additional history is what prevents - clients from forking on checkpoints and dropping forked history. - -*/ -const sliceCpIndex = function (cpIndex, line) { - // Remove "old" checkpoints (cp sent before 100 messages ago) - const minLine = Math.max(0, (line - 100)); - let start = cpIndex.slice(0, -2); - const end = cpIndex.slice(-2); - start = start.filter(function (obj) { - return obj.line > minLine; - }); - return start.concat(end); -}; - -const isMetadataMessage = function (parsed) { - return Boolean(parsed && parsed.channel); -}; - -// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays -const isValidValidateKeyString = function (key) { - try { - return typeof(key) === 'string' && - Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; - } catch (e) { - return false; - } -}; - -module.exports.create = function (cfg) { - const rpc = cfg.rpc; - const tasks = cfg.tasks; - const store = cfg.store; - const retainData = cfg.retainData; - Log = cfg.log; - - Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); - - const metadata_cache = {}; - const HISTORY_KEEPER_ID = Crypto.randomBytes(8).toString('hex'); - - Log.verbose('HK_ID', 'History keeper ID: ' + HISTORY_KEEPER_ID); - - let sendMsg = function () {}; - let STANDARD_CHANNEL_LENGTH, EPHEMERAL_CHANNEL_LENGTH; - const setConfig = function (config) { - STANDARD_CHANNEL_LENGTH = config.STANDARD_CHANNEL_LENGTH; - EPHEMERAL_CHANNEL_LENGTH = config.EPHEMERAL_CHANNEL_LENGTH; - sendMsg = config.sendMsg; - }; - - /* computeIndex - can call back with an error or a computed index which includes: - * cpIndex: - * array including any checkpoints pushed within the last 100 messages - * processed by 'sliceCpIndex(cpIndex, line)' - * offsetByHash: - * a map containing message offsets by their hash - * this is for every message in history, so it could be very large... - * except we remove offsets from the map if they occur before the oldest relevant checkpoint - * size: in bytes - * metadata: - * validationKey - * expiration time - * owners - * ??? (anything else we might add in the future) - * line - * the number of messages in history - * including the initial metadata line, if it exists - - */ - const computeIndex = function (channelName, cb) { - const cpIndex = []; - let messageBuf = []; - let metadata; - let i = 0; - - const ref = {}; - - const CB = Once(cb); - - const offsetByHash = {}; - let size = 0; - nThen(function (w) { - // iterate over all messages in the channel log - // old channels can contain metadata as the first message of the log - // remember metadata the first time you encounter it - // otherwise index important messages in the log - store.readMessagesBin(channelName, 0, (msgObj, readMore) => { - let msg; - // keep an eye out for the metadata line if you haven't already seen it - // but only check for metadata on the first line - if (!i && !metadata && msgObj.buff.indexOf('{') === 0) { - i++; // always increment the message counter - msg = tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return readMore(); } - - // validate that the current line really is metadata before storing it as such - if (isMetadataMessage(msg)) { - metadata = msg; - return readMore(); - } - } - i++; - if (msgObj.buff.indexOf('cp|') > -1) { - msg = msg || tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return readMore(); } - // cache the offsets of checkpoints if they can be parsed - if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { - cpIndex.push({ - offset: msgObj.offset, - line: i - }); - // we only want to store messages since the latest checkpoint - // so clear the buffer every time you see a new one - messageBuf = []; - } - } - // if it's not metadata or a checkpoint then it should be a regular message - // store it in the buffer - messageBuf.push(msgObj); - return readMore(); - }, w((err) => { - if (err && err.code !== 'ENOENT') { - w.abort(); - return void CB(err); - } - - // once indexing is complete you should have a buffer of messages since the latest checkpoint - // map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients - messageBuf.forEach((msgObj) => { - const msg = tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return; } - if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { - // msgObj.offset is API guaranteed by our storage module - // it should always be a valid positive integer - offsetByHash[getHash(msg[4])] = msgObj.offset; - } - // There is a trailing \n at the end of the file - size = msgObj.offset + msgObj.buff.length + 1; - }); - })); - }).nThen(function (w) { - // create a function which will iterate over amendments to the metadata - const handler = Meta.createLineHandler(ref, Log.error); - - // initialize the accumulator in case there was a foundational metadata line in the log content - if (metadata) { handler(void 0, metadata); } - - // iterate over the dedicated metadata log (if it exists) - // proceed even in the event of a stream error on the metadata log - store.readDedicatedMetadata(channelName, handler, w(function (err) { - if (err) { - return void Log.error("DEDICATED_METADATA_ERROR", err); - } - })); - }).nThen(function () { - // when all is done, cache the metadata in memory - if (ref.index) { // but don't bother if no metadata was found... - metadata = metadata_cache[channelName] = ref.meta; - } - // and return the computed index - CB(null, { - // Only keep the checkpoints included in the last 100 messages - cpIndex: sliceCpIndex(cpIndex, i), - offsetByHash: offsetByHash, - size: size, - metadata: metadata, - line: i - }); - }); - }; - - /* getIndex - calls back with an error if anything goes wrong - or with a cached index for a channel if it exists - (along with metadata) - otherwise it calls back with the index computed by 'computeIndex' - - as an added bonus: - if the channel exists but its index does not then it caches the index - */ - const batchIndexReads = BatchRead("HK_GET_INDEX"); - const getIndex = (ctx, channelName, cb) => { - const chan = ctx.channels[channelName]; - // if there is a channel in memory and it has an index cached, return it - if (chan && chan.index) { - // enforce async behaviour - return void setTimeout(function () { - cb(undefined, chan.index); - }); - } - - batchIndexReads(channelName, cb, function (done) { - computeIndex(channelName, (err, ret) => { - // this is most likely an unrecoverable filesystem error - if (err) { return void done(err); } - // cache the computed result if possible - if (chan) { chan.index = ret; } - // return - done(void 0, ret); - }); - }); - }; - - /*:: - type cp_index_item = { - offset: number, - line: number - } - */ - - /* storeMessage - * ctx - * channel id - * the message to store - * whether the message is a checkpoint - * optionally the hash of the message - * it's not always used, but we guard against it - - - * async but doesn't have a callback - * source of a race condition whereby: - * two messaages can be inserted - * two offsets can be computed using the total size of all the messages - * but the offsets don't correspond to the actual location of the newlines - * because the two actions were performed like ABba... - * the fix is to use callbacks and implement queueing for writes - * to guarantee that offset computation is always atomic with writes - */ - const queueStorage = WriteQueue(); - - const storeMessage = function (ctx, channel, msg, isCp, optionalMessageHash) { - const id = channel.id; - - queueStorage(id, function (next) { - const msgBin = Buffer.from(msg + '\n', 'utf8'); - // Store the message first, and update the index only once it's stored. - // store.messageBin can be async so updating the index first may - // result in a wrong cpIndex - nThen((waitFor) => { - store.messageBin(id, msgBin, waitFor(function (err) { - if (err) { - waitFor.abort(); - Log.error("HK_STORE_MESSAGE_ERROR", err.message); - - // this error is critical, but there's not much we can do at the moment - // proceed with more messages, but they'll probably fail too - // at least you won't have a memory leak - - // TODO make it possible to respond to clients with errors so they know - // their message wasn't stored - return void next(); - } - })); - }).nThen((waitFor) => { - getIndex(ctx, id, waitFor((err, index) => { - if (err) { - Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); - // non-critical, we'll be able to get the channel index later - return void next(); - } - if (typeof (index.line) === "number") { index.line++; } - if (isCp) { - index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); - for (let k in index.offsetByHash) { - if (index.offsetByHash[k] < index.cpIndex[0]) { - delete index.offsetByHash[k]; - } - } - index.cpIndex.push(({ - offset: index.size, - line: ((index.line || 0) + 1) - } /*:cp_index_item*/)); - } - if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } - index.size += msgBin.length; - - // handle the next element in the queue - next(); - })); - }); - }); - }; - - /* historyKeeperBroadcast - * uses API from the netflux server to send messages to every member of a channel - * sendMsg runs in a try-catch and drops users if sending a message fails - */ - const historyKeeperBroadcast = function (ctx, channel, msg) { - let chan = ctx.channels[channel] || (([] /*:any*/) /*:Chan_t*/); - chan.forEach(function (user) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)]); - }); - }; - - /* expireChannel is here to clean up channels that should have been removed - but for some reason are still present - */ - const expireChannel = function (ctx, channel) { - if (retainData) { - return void store.archiveChannel(channel, function (err) { - Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { - channelId: channel, - status: err? String(err): "SUCCESS", - }); - }); - } - - store.removeChannel(channel, function (err) { - Log.info("DELETION_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { - channelid: channel, - status: err? String(err): "SUCCESS", - }); - }); - }; - - /* checkExpired - * synchronously returns true or undefined to indicate whether the channel is expired - * according to its metadata - * has some side effects: - * closes the channel via the store.closeChannel API - * and then broadcasts to all channel members that the channel has expired - * removes the channel from the netflux-server's in-memory cache - * removes the channel metadata from history keeper's in-memory cache - - FIXME the boolean nature of this API should be separated from its side effects - */ - const checkExpired = function (ctx, channel) { - if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } - let metadata = metadata_cache[channel]; - if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } - - // the number of milliseconds ago the channel should have expired - let pastDue = (+new Date()) - metadata.expire; - - // less than zero means that it hasn't expired yet - if (pastDue < 0) { return false; } - - // if it should have expired more than a day ago... - // there may have been a problem with scheduling tasks - // or the scheduled tasks may not be running - // so trigger a removal from here - if (pastDue >= ONE_DAY) { expireChannel(ctx, channel); } - - // close the channel - store.closeChannel(channel, function () { - historyKeeperBroadcast(ctx, channel, { - error: 'EEXPIRED', - channel: channel - }); - // remove it from any caches after you've told anyone in the channel - // that it has expired - delete ctx.channels[channel]; - delete metadata_cache[channel]; - }); - - // return true to indicate that it has expired - return true; - }; - - var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; - - /* onChannelMessage - Determine what we should store when a message a broadcasted to a channel" - - * ignores ephemeral channels - * ignores messages sent to expired channels - * rejects duplicated checkpoints - * validates messages to channels that have validation keys - * caches the id of the last saved checkpoint - * adds timestamps to incoming messages - * writes messages to the store - */ - const onChannelMessage = function (ctx, channel, msgStruct) { - // TODO our usage of 'channel' here looks prone to errors - // we only use it for its 'id', but it can contain other stuff - // also, we're using this RPC from both the RPC and Netflux-server - // we should probably just change this to expect a channel id directly - - // don't store messages if the channel id indicates that it's an ephemeral message - if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } - - const isCp = /^cp\|/.test(msgStruct[4]); - let id; - if (isCp) { - // id becomes either null or an array or results... - id = CHECKPOINT_PATTERN.exec(msgStruct[4]); - if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { - // Reject duplicate checkpoints - return; - } - } - - let metadata; - nThen(function (w) { - // getIndex (and therefore the latest metadata) - getIndex(ctx, channel.id, w(function (err, index) { - if (err) { - w.abort(); - return void Log.error('CHANNEL_MESSAGE_ERROR', err); - } - - if (!index.metadata) { - // if there's no channel metadata then it can't be an expiring channel - // nor can we possibly validate it - return; - } - - metadata = index.metadata; - - // don't write messages to expired channels - if (checkExpired(ctx, channel)) { return void w.abort(); } - - // if there's no validateKey present skip to the next block - if (!metadata.validateKey) { return; } - - // trim the checkpoint indicator off the message if it's present - let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; - // convert the message from a base64 string into a Uint8Array - - // FIXME this can fail and the client won't notice - signedMsg = Nacl.util.decodeBase64(signedMsg); - - // FIXME this can blow up - // TODO check that that won't cause any problems other than not being able to append... - const validateKey = Nacl.util.decodeBase64(metadata.validateKey); - // validate the message - const validated = Nacl.sign.open(signedMsg, validateKey); - if (!validated) { - // don't go any further if the message fails validation - w.abort(); - Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); - return; - } - })); - }).nThen(function () { - // do checkpoint stuff... - - // 1. get the checkpoint id - // 2. reject duplicate checkpoints - - if (isCp) { - // if the message is a checkpoint we will have already validated - // that it isn't a duplicate. remember its id so that we can - // repeat this process for the next incoming checkpoint - - // WARNING: the fact that we only check the most recent checkpoints - // is a potential source of bugs if one editor has high latency and - // pushes a duplicate of an earlier checkpoint than the latest which - // has been pushed by editors with low latency - // FIXME - if (Array.isArray(id) && id[2]) { - // Store new checkpoint hash - channel.lastSavedCp = id[2]; - } - } - - // add the time to the message - msgStruct.push(now()); - - // storeMessage - storeMessage(ctx, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4])); - }); - }; - - /* dropChannel - * exported as API - * used by chainpad-server/NetfluxWebsocketSrv.js - * cleans up memory structures which are managed entirely by the historyKeeper - * the netflux server manages other memory in ctx.channels - */ - const dropChannel = function (chanName) { - delete metadata_cache[chanName]; - }; - - /* getHistoryOffset - returns a number representing the byte offset from the start of the log - for whatever history you're seeking. - - query by providing a 'lastKnownHash', - which is really just a string of the first 64 characters of an encrypted message. - OR by -1 which indicates that we want the full history (byte offset 0) - OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant - (typically the last few checkpoints) - - this function embeds a lot of the history keeper's logic: - - 0. if you passed -1 as the lastKnownHash it means you want the complete history - * I'm not sure why you'd need to call this function if you know it will return 0 in this case... - * it has a side-effect of filling the index cache if it's empty - 1. if you provided a lastKnownHash and that message does not exist in the history: - * either the client has made a mistake or the history they knew about no longer exists - * call back with EINVAL - 2. if you did not provide a lastKnownHash - * and there are fewer than two checkpoints: - * return 0 (read from the start of the file) - * and there are two or more checkpoints: - * return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant - 3. if you did provide a lastKnownHash - * read through the log until you find the hash that you're looking for - * call back with either the byte offset of the message that you found OR - * -1 if you didn't find it - - */ - const getHistoryOffset = (ctx, channelName, lastKnownHash, cb /*:(e:?Error, os:?number)=>void*/) => { - // lastKnownhash === -1 means we want the complete history - if (lastKnownHash === -1) { return void cb(null, 0); } - let offset = -1; - nThen((waitFor) => { - getIndex(ctx, channelName, waitFor((err, index) => { - if (err) { waitFor.abort(); return void cb(err); } - - // check if the "hash" the client is requesting exists in the index - const lkh = index.offsetByHash[lastKnownHash]; - // we evict old hashes from the index as new checkpoints are discovered. - // if someone connects and asks for a hash that is no longer relevant, - // we tell them it's an invalid request. This is because of the semantics of "GET_HISTORY" - // which is only ever used when connecting or reconnecting in typical uses of history... - // this assumption should hold for uses by chainpad, but perhaps not for other uses cases. - // EXCEPT: other cases don't use checkpoints! - // clients that are told that their request is invalid should just make another request - // without specifying the hash, and just trust the server to give them the relevant data. - // QUESTION: does this mean mailboxes are causing the server to store too much stuff in memory? - if (lastKnownHash && typeof(lkh) !== "number") { - waitFor.abort(); - return void cb(new Error('EINVAL')); - } - - // Since last 2 checkpoints - if (!lastKnownHash) { - waitFor.abort(); - // Less than 2 checkpoints in the history: return everything - if (index.cpIndex.length < 2) { return void cb(null, 0); } - // Otherwise return the second last checkpoint's index - return void cb(null, index.cpIndex[0].offset); - /* LATER... - in practice, two checkpoints can be very close together - we have measures to avoid duplicate checkpoints, but editors - can produce nearby checkpoints which are slightly different, - and slip past these protections. To be really careful, we can - seek past nearby checkpoints by some number of patches so as - to ensure that all editors have sufficient knowledge of history - to reconcile their differences. */ - } - - offset = lkh; - })); - }).nThen((waitFor) => { - // if offset is less than zero then presumably the channel has no messages - // returning falls through to the next block and therefore returns -1 - if (offset !== -1) { return; } - - // do a lookup from the index - // FIXME maybe we don't need this anymore? - // otherwise we have a non-negative offset and we can start to read from there - store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { - // tryParse return a parsed message or undefined - const msg = tryParse(msgObj.buff.toString('utf8')); - // if it was undefined then go onto the next message - if (typeof msg === "undefined") { return readMore(); } - if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4])) { - return void readMore(); - } - offset = msgObj.offset; - abort(); - }, waitFor(function (err) { - if (err) { waitFor.abort(); return void cb(err); } - })); - }).nThen(() => { - cb(null, offset); - }); - }; - - /* getHistoryAsync - * finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' - * streams through the rest of the messages, safely parsing them and returning the parsed content to the handler - * calls back when it has reached the end of the log - - Used by: - * GET_HISTORY - - */ - const getHistoryAsync = (ctx, channelName, lastKnownHash, beforeHash, handler, cb) => { - let offset = -1; - nThen((waitFor) => { - getHistoryOffset(ctx, channelName, lastKnownHash, waitFor((err, os) => { - if (err) { - waitFor.abort(); - return void cb(err); - } - offset = os; - })); - }).nThen((waitFor) => { - if (offset === -1) { return void cb(new Error("could not find offset")); } - const start = (beforeHash) ? 0 : offset; - store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { - if (beforeHash && msgObj.offset >= offset) { return void abort(); } - handler(tryParse(msgObj.buff.toString('utf8')), readMore); - }, waitFor(function (err) { - return void cb(err); - })); - }); - }; - - /* getOlderHistory - * allows clients to query for all messages until a known hash is read - * stores all messages in history as they are read - * can therefore be very expensive for memory - * should probably be converted to a streaming interface - - Used by: - * GET_HISTORY_RANGE - */ - const getOlderHistory = function (channelName, oldestKnownHash, cb) { - var messageBuffer = []; - var found = false; - store.getMessages(channelName, function (msgStr) { - if (found) { return; } - - let parsed = tryParse(msgStr); - if (typeof parsed === "undefined") { return; } - - // identify classic metadata messages by their inclusion of a channel. - // and don't send metadata, since: - // 1. the user won't be interested in it - // 2. this metadata is potentially incomplete/incorrect - if (isMetadataMessage(parsed)) { return; } - - var content = parsed[4]; - if (typeof(content) !== 'string') { return; } - - var hash = getHash(content); - if (hash === oldestKnownHash) { - found = true; - } - messageBuffer.push(parsed); - }, function (err) { - if (err) { - Log.error("HK_GET_OLDER_HISTORY", err); - } - cb(messageBuffer); - }); - }; - - - /* onChannelCleared - * broadcasts to all clients in a channel if that channel is deleted - */ - const onChannelCleared = function (ctx, channel) { - historyKeeperBroadcast(ctx, channel, { - error: 'ECLEARED', - channel: channel - }); - }; - // When a channel is removed from datastore, broadcast a message to all its connected users - const onChannelDeleted = function (ctx, channel) { - store.closeChannel(channel, function () { - historyKeeperBroadcast(ctx, channel, { - error: 'EDELETED', - channel: channel - }); - }); - delete ctx.channels[channel]; - delete metadata_cache[channel]; - }; - // Check if the selected channel is expired - // If it is, remove it from memory and broadcast a message to its members - - const onChannelMetadataChanged = function (ctx, channel, metadata) { - if (channel && metadata_cache[channel] && typeof (metadata) === "object") { - Log.silly('SET_METADATA_CACHE', 'Channel '+ channel +', metadata: '+ JSON.stringify(metadata)); - metadata_cache[channel] = metadata; - if (ctx.channels[channel] && ctx.channels[channel].index) { - ctx.channels[channel].index.metadata = metadata; - } - historyKeeperBroadcast(ctx, channel, metadata); - } - }; - - const handleGetHistory = function (ctx, seq, user, parsed) { - // parsed[1] is the channel id - // parsed[2] is a validation key or an object containing metadata (optionnal) - // parsed[3] is the last known hash (optionnal) - sendMsg(ctx, user, [seq, 'ACK']); - var channelName = parsed[1]; - var config = parsed[2]; - var metadata = {}; - var lastKnownHash; - - // clients can optionally pass a map of attributes - // if the channel already exists this map will be ignored - // otherwise it will be stored as the initial metadata state for the channel - if (config && typeof config === "object" && !Array.isArray(parsed[2])) { - lastKnownHash = config.lastKnownHash; - metadata = config.metadata || {}; - if (metadata.expire) { - metadata.expire = +metadata.expire * 1000 + (+new Date()); - } - } - metadata.channel = channelName; - metadata.created = +new Date(); - - // if the user sends us an invalid key, we won't be able to validate their messages - // so they'll never get written to the log anyway. Let's just drop their message - // on the floor instead of doing a bunch of extra work - // TODO send them an error message so they know something is wrong - if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { - return void Log.error('HK_INVALID_KEY', metadata.validateKey); - } - - nThen(function (waitFor) { - var w = waitFor(); - - /* unless this is a young channel, we will serve all messages from an offset - this will not include the channel metadata, so we need to explicitly fetch that. - unfortunately, we can't just serve it blindly, since then young channels will - send the metadata twice, so let's do a quick check of what we're going to serve... - */ - getIndex(ctx, channelName, waitFor((err, index) => { - /* if there's an error here, it should be encountered - and handled by the next nThen block. - so, let's just fall through... - */ - if (err) { return w(); } - - - // it's possible that the channel doesn't have metadata - // but in that case there's no point in checking if the channel expired - // or in trying to send metadata, so just skip this block - if (!index || !index.metadata) { return void w(); } - // And then check if the channel is expired. If it is, send the error and abort - // FIXME this is hard to read because 'checkExpired' has side effects - if (checkExpired(ctx, channelName)) { return void waitFor.abort(); } - // always send metadata with GET_HISTORY requests - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(index.metadata)], w); - })); - }).nThen(() => { - let msgCount = 0; - - // TODO compute lastKnownHash in a manner such that it will always skip past the metadata line? - getHistoryAsync(ctx, channelName, lastKnownHash, false, (msg, readMore) => { - if (!msg) { return; } - msgCount++; - // avoid sending the metadata message a second time - if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)], readMore); - }, (err) => { - if (err && err.code !== 'ENOENT') { - if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); } - const parsedMsg = {error:err.message, channel: channelName}; - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - return; - } - - const chan = ctx.channels[channelName]; - - if (msgCount === 0 && !metadata_cache[channelName] && chan && chan.indexOf(user) > -1) { - metadata_cache[channelName] = metadata; - - // the index will have already been constructed and cached at this point - // but it will not have detected any metadata because it hasn't been written yet - // this means that the cache starts off as invalid, so we have to correct it - if (chan && chan.index) { chan.index.metadata = metadata; } - - // new channels will always have their metadata written to a dedicated metadata log - // but any lines after the first which are not amendments in a particular format will be ignored. - // Thus we should be safe from race conditions here if just write metadata to the log as below... - // TODO validate this logic - // otherwise maybe we need to check that the metadata log is empty as well - store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { - if (err) { - // FIXME tell the user that there was a channel error? - return void Log.error('HK_WRITE_METADATA', { - channel: channelName, - error: err, - }); - } - }); - - // write tasks - if(tasks && metadata.expire && typeof(metadata.expire) === 'number') { - // the fun part... - // the user has said they want this pad to expire at some point - tasks.write(metadata.expire, "EXPIRE", [ channelName ], function (err) { - if (err) { - // if there is an error, we don't want to crash the whole server... - // just log it, and if there's a problem you'll be able to fix it - // at a later date with the provided information - Log.error('HK_CREATE_EXPIRE_TASK', err); - Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); - } - }); - } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(metadata)]); - } - - // End of history message: - let parsedMsg = {state: 1, channel: channelName}; - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - }); - }); - }; - - const handleGetHistoryRange = function (ctx, seq, user, parsed) { - var channelName = parsed[1]; - var map = parsed[2]; - if (!(map && typeof(map) === 'object')) { - return void sendMsg(ctx, user, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); - } - - var oldestKnownHash = map.from; - var desiredMessages = map.count; - var desiredCheckpoint = map.cpCount; - var txid = map.txid; - if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { - return void sendMsg(ctx, user, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); - } - - if (!txid) { - return void sendMsg(ctx, user, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); - } - - sendMsg(ctx, user, [seq, 'ACK']); - return void getOlderHistory(channelName, oldestKnownHash, function (messages) { - var toSend = []; - if (typeof (desiredMessages) === "number") { - toSend = messages.slice(-desiredMessages); - } else { - let cpCount = 0; - for (var i = messages.length - 1; i >= 0; i--) { - if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { - cpCount++; - } - toSend.unshift(messages[i]); - if (cpCount >= desiredCheckpoint) { break; } - } - } - toSend.forEach(function (msg) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, - JSON.stringify(['HISTORY_RANGE', txid, msg])]); - }); - - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, - JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) - ]); - }); - }; - - const handleGetFullHistory = function (ctx, seq, user, parsed) { - // parsed[1] is the channel id - // parsed[2] is a validation key (optionnal) - // parsed[3] is the last known hash (optionnal) - sendMsg(ctx, user, [seq, 'ACK']); - - // FIXME should we send metadata here too? - // none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22) - return void getHistoryAsync(ctx, parsed[1], -1, false, (msg, readMore) => { - if (!msg) { return; } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(['FULL_HISTORY', msg])], readMore); - }, (err) => { - let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; - if (err) { - Log.error('HK_GET_FULL_HISTORY', err.stack); - parsedMsg = ['ERROR', parsed[1], err.message]; - } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - }); - }; - - const handleRPC = function (ctx, seq, user, parsed) { - if (typeof(rpc) !== 'function') { return; } - - /* RPC Calls... */ - var rpc_call = parsed.slice(1); - - sendMsg(ctx, user, [seq, 'ACK']); - try { - // slice off the sequence number and pass in the rest of the message - rpc(ctx, rpc_call, function (err, output) { - if (err) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', err])]); - return; - } - var msg = rpc_call[0].slice(); - if (msg[3] === 'REMOVE_OWNED_CHANNEL') { - onChannelDeleted(ctx, msg[4]); - } - if (msg[3] === 'CLEAR_OWNED_CHANNEL') { - onChannelCleared(ctx, msg[4]); - } - - if (msg[3] === 'SET_METADATA') { // or whatever we call the RPC???? - // make sure we update our cache of metadata - // or at least invalidate it and force other mechanisms to recompute its state - // 'output' could be the new state as computed by rpc - onChannelMetadataChanged(ctx, msg[4].channel, output[1]); - } - - // unauthenticated RPC calls have a different message format - if (msg[0] === "WRITE_PRIVATE_MESSAGE" && output && output.channel) { - // this is an inline reimplementation of historyKeeperBroadcast - // because if we use that directly it will bypass signature validation - // which opens up the user to malicious behaviour - let chan = ctx.channels[output.channel]; - if (chan && chan.length) { - chan.forEach(function (user) { - sendMsg(ctx, user, output.message); - //[0, null, 'MSG', user.id, JSON.stringify(output.message)]); - }); - } - // rpc and anonRpc expect their responses to be of a certain length - // and we've already used the output of the rpc call, so overwrite it - output = [null, null, null]; - } - - // finally, send a response to the client that sent the RPC - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0]].concat(output))]); - }); - } catch (e) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); - } - }; - - /* onDirectMessage - * exported for use by the netflux-server - * parses and handles all direct messages directed to the history keeper - * check if it's expired and execute all the associated side-effects - * routes queries to the appropriate handlers - */ - const onDirectMessage = function (ctx, seq, user, json) { - Log.silly('HK_MESSAGE', json); - - let parsed; - try { - parsed = JSON.parse(json[2]); - } catch (err) { - Log.error("HK_PARSE_CLIENT_MESSAGE", json); - return; - } - - // If the requested history is for an expired channel, abort - // Note the if we don't have the keys for that channel in metadata_cache, we'll - // have to abort later (once we know the expiration time) - if (checkExpired(ctx, parsed[1])) { return; } - - if (parsed[0] === 'GET_HISTORY') { - return void handleGetHistory(ctx, seq, user, parsed); - } - if (parsed[0] === 'GET_HISTORY_RANGE') { - return void handleGetHistoryRange(ctx, seq, user, parsed); - } - if (parsed[0] === 'GET_FULL_HISTORY') { - return void handleGetFullHistory(ctx, seq, user, parsed); - } - return void handleRPC(ctx, seq, user, parsed); - }; - - return { - id: HISTORY_KEEPER_ID, - setConfig: setConfig, - onChannelMessage: onChannelMessage, - dropChannel: dropChannel, - checkExpired: checkExpired, - onDirectMessage: onDirectMessage, - }; -}; - -}()); diff --git a/lib/api.js b/lib/api.js new file mode 100644 index 000000000..bd5c99629 --- /dev/null +++ b/lib/api.js @@ -0,0 +1,48 @@ +/* jshint esversion: 6 */ +const WebSocketServer = require('ws').Server; +const NetfluxSrv = require('chainpad-server'); + +module.exports.create = function (config) { + // asynchronously create a historyKeeper and RPC together + require('./historyKeeper.js').create(config, function (err, historyKeeper) { + if (err) { throw err; } + + var log = config.log; + + // spawn ws server and attach netflux event handlers + NetfluxSrv.create(new WebSocketServer({ server: config.httpServer})) + .on('channelClose', historyKeeper.channelClose) + .on('channelMessage', historyKeeper.channelMessage) + .on('channelOpen', historyKeeper.channelOpen) + .on('sessionClose', function (userId, reason) { + if (['BAD_MESSAGE', 'SOCKET_ERROR', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) { + if (reason && reason.code === 'ECONNRESET') { return; } + return void log.error('SESSION_CLOSE_WITH_ERROR', { + userId: userId, + reason: reason, + }); + } + + if (reason && reason === 'SOCKET_CLOSED') { return; } + log.verbose('SESSION_CLOSE_ROUTINE', { + userId: userId, + reason: reason, + }); + }) + .on('error', function (error, label, info) { + if (!error) { return; } + /* labels: + SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT, + FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE, + NETFLUX_WEBSOCKET_ERROR + */ + log.error(label, { + code: error.code, + message: error.message, + stack: error.stack, + info: info, + }); + }) + .register(historyKeeper.id, historyKeeper.directMessage); + }); +}; diff --git a/lib/commands/admin-rpc.js b/lib/commands/admin-rpc.js new file mode 100644 index 000000000..a3bb071ad --- /dev/null +++ b/lib/commands/admin-rpc.js @@ -0,0 +1,122 @@ +/*jshint esversion: 6 */ +const BatchRead = require("../batch-read"); +const nThen = require("nthen"); +const getFolderSize = require("get-folder-size"); +//const Util = require("../common-util"); + +var Fs = require("fs"); + +var Admin = module.exports; + +var getActiveSessions = function (Env, Server, cb) { + var stats = Server.getSessionStats(); + cb(void 0, [ + stats.total, + stats.unique + ]); +}; + +var shutdown = function (Env, Server, cb) { + if (true) { + return void cb('E_NOT_IMPLEMENTED'); + } + + // disconnect all users and reject new connections + Server.shutdown(); + + // stop all intervals that may be running + Object.keys(Env.intervals).forEach(function (name) { + clearInterval(Env.intervals[name]); + }); + + // set a flag to prevent incoming database writes + // wait until all pending writes are complete + // then process.exit(0); + // and allow system functionality to restart the server +}; + +const batchRegisteredUsers = BatchRead("GET_REGISTERED_USERS"); +var getRegisteredUsers = function (Env, cb) { + batchRegisteredUsers('', cb, function (done) { + var dir = Env.paths.pin; + var folders; + var users = 0; + nThen(function (waitFor) { + Fs.readdir(dir, waitFor(function (err, list) { + if (err) { + waitFor.abort(); + return void done(err); + } + folders = list; + })); + }).nThen(function (waitFor) { + folders.forEach(function (f) { + var dir = Env.paths.pin + '/' + f; + Fs.readdir(dir, waitFor(function (err, list) { + if (err) { return; } + users += list.length; + })); + }); + }).nThen(function () { + done(void 0, users); + }); + }); +}; + +const batchDiskUsage = BatchRead("GET_DISK_USAGE"); +var getDiskUsage = function (Env, cb) { + batchDiskUsage('', cb, function (done) { + var data = {}; + nThen(function (waitFor) { + getFolderSize('./', waitFor(function(err, info) { + data.total = info; + })); + getFolderSize(Env.paths.pin, waitFor(function(err, info) { + data.pin = info; + })); + getFolderSize(Env.paths.blob, waitFor(function(err, info) { + data.blob = info; + })); + getFolderSize(Env.paths.staging, waitFor(function(err, info) { + data.blobstage = info; + })); + getFolderSize(Env.paths.block, waitFor(function(err, info) { + data.block = info; + })); + getFolderSize(Env.paths.data, waitFor(function(err, info) { + data.datastore = info; + })); + }).nThen(function () { + done(void 0, data); + }); + }); +}; + +Admin.command = function (Env, safeKey, data, cb, Server) { + var admins = Env.admins; + //var unsafeKey = Util.unescapeKeyCharacters(safeKey); + if (admins.indexOf(safeKey) === -1) { + return void cb("FORBIDDEN"); + } + + // Handle commands here + switch (data[0]) { + case 'ACTIVE_SESSIONS': + return getActiveSessions(Env, Server, cb); + case 'ACTIVE_PADS': + return cb(void 0, Server.getActiveChannelCount()); + case 'REGISTERED_USERS': + return getRegisteredUsers(Env, cb); + case 'DISK_USAGE': + return getDiskUsage(Env, cb); + case 'FLUSH_CACHE': + Env.flushCache(); + return cb(void 0, true); + case 'SHUTDOWN': + return shutdown(Env, Server, cb); + default: + return cb('UNHANDLED_ADMIN_COMMAND'); + } +}; + + diff --git a/lib/commands/block.js b/lib/commands/block.js new file mode 100644 index 000000000..3a264c167 --- /dev/null +++ b/lib/commands/block.js @@ -0,0 +1,172 @@ +/*jshint esversion: 6 */ +/* globals Buffer*/ +var Block = module.exports; + +const Fs = require("fs"); +const Fse = require("fs-extra"); +const Path = require("path"); +const Nacl = require("tweetnacl/nacl-fast"); +const nThen = require("nthen"); + +const Util = require("../common-util"); + +/* + We assume that the server is secured against MitM attacks + via HTTPS, and that malicious actors do not have code execution + capabilities. If they do, we have much more serious problems. + + The capability to replay a block write or remove results in either + a denial of service for the user whose block was removed, or in the + case of a write, a rollback to an earlier password. + + Since block modification is destructive, this can result in loss + of access to the user's drive. + + So long as the detached signature is never observed by a malicious + party, and the server discards it after proof of knowledge, replays + are not possible. However, this precludes verification of the signature + at a later time. + + Despite this, an integrity check is still possible by the original + author of the block, since we assume that the block will have been + encrypted with xsalsa20-poly1305 which is authenticated. +*/ +var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS + // convert the public key to a Uint8Array and validate it + if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } + + var u8_public_key; + try { + u8_public_key = Nacl.util.decodeBase64(publicKey); + } catch (e) { + return void cb('E_INVALID_KEY'); + } + + var u8_signature; + try { + u8_signature = Nacl.util.decodeBase64(signature); + } catch (e) { + Env.Log.error('INVALID_BLOCK_SIGNATURE', e); + return void cb('E_INVALID_SIGNATURE'); + } + + // convert the block to a Uint8Array + var u8_block; + try { + u8_block = Nacl.util.decodeBase64(block); + } catch (e) { + return void cb('E_INVALID_BLOCK'); + } + + // take its hash + var hash = Nacl.hash(u8_block); + + // validate the signature against the hash of the content + var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); + + // existing authentication ensures that users cannot replay old blocks + + // call back with (err) if unsuccessful + if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } + + return void cb(null, u8_block); +}; + +var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS + // prepare publicKey to be used as a file name + var safeKey = Util.escapeKeyCharacters(publicKey); + + // validate safeKey + if (typeof(safeKey) !== 'string') { + return; + } + + // derive the full path + // /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd + return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); +}; + +Block.writeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS + //console.log(msg); + var publicKey = msg[0]; + var signature = msg[1]; + var block = msg[2]; + + validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { + if (e) { return void cb(e); } + if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } + + // derive the filepath + var path = createLoginBlockPath(Env, publicKey); + + // make sure the path is valid + if (typeof(path) !== 'string') { + return void cb('E_INVALID_BLOCK_PATH'); + } + + var parsed = Path.parse(path); + if (!parsed || typeof(parsed.dir) !== 'string') { + return void cb("E_INVALID_BLOCK_PATH_2"); + } + + nThen(function (w) { + // make sure the path to the file exists + Fse.mkdirp(parsed.dir, w(function (e) { + if (e) { + w.abort(); + cb(e); + } + })); + }).nThen(function () { + // actually write the block + + // flow is dumb and I need to guard against this which will never happen + /*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ + /*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ + Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { + if (err) { return void cb(err); } + cb(); + }); + }); + }); +}; + +/* + When users write a block, they upload the block, and provide + a signature proving that they deserve to be able to write to + the location determined by the public key. + + When removing a block, there is nothing to upload, but we need + to sign something. Since the signature is considered sensitive + information, we can just sign some constant and use that as proof. + +*/ +Block.removeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS + var publicKey = msg[0]; + var signature = msg[1]; + var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant + + validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { + if (e) { return void cb(e); } + // derive the filepath + var path = createLoginBlockPath(Env, publicKey); + + // make sure the path is valid + if (typeof(path) !== 'string') { + return void cb('E_INVALID_BLOCK_PATH'); + } + + // FIXME COLDSTORAGE + Fs.unlink(path, function (err) { + Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', { + publicKey: publicKey, + path: path, + status: err? String(err): 'SUCCESS', + }); + + if (err) { return void cb(err); } + cb(); + }); + }); +}; + diff --git a/lib/commands/channel.js b/lib/commands/channel.js new file mode 100644 index 000000000..88404a9b2 --- /dev/null +++ b/lib/commands/channel.js @@ -0,0 +1,274 @@ +/*jshint esversion: 6 */ +const Channel = module.exports; + +const Util = require("../common-util"); +const nThen = require("nthen"); +const Core = require("./core"); +const Metadata = require("./metadata"); + +Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb, Server) { + if (typeof(channelId) !== 'string' || channelId.length !== 32) { + return cb('INVALID_ARGUMENTS'); + } + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + Metadata.getMetadata(Env, channelId, function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } + // Confirm that the channel is owned by the user in question + if (!Core.isOwner(metadata, unsafeKey)) { + return void cb('INSUFFICIENT_PERMISSIONS'); + } + return void Env.msgStore.clearChannel(channelId, function (e) { + if (e) { return void cb(e); } + cb(); + + const channel_cache = Env.historyKeeper.channel_cache; + + const clear = function () { + // delete the channel cache because it will have been invalidated + delete channel_cache[channelId]; + }; + + nThen(function (w) { + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, [ + 0, + Env.historyKeeper.id, + 'MSG', + userId, + JSON.stringify({ + error: 'ECLEARED', + channel: channelId + }) + ], w()); + }); + }).nThen(function () { + clear(); + }).orTimeout(function () { + Env.Log.warn("ON_CHANNEL_CLEARED_TIMEOUT", channelId); + clear(); + }, 30000); + }); + }); +}; + +Channel.removeOwnedChannel = function (Env, safeKey, channelId, cb, Server) { + if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) { + return cb('INVALID_ARGUMENTS'); + } + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + if (Env.blobStore.isFileId(channelId)) { + var blobId = channelId; + + return void nThen(function (w) { + // check if you have permissions + Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { + if (err || !owned) { + w.abort(); + return void cb("INSUFFICIENT_PERMISSIONS"); + } + })); + }).nThen(function (w) { + // remove the blob + return void Env.blobStore.archive.blob(blobId, w(function (err) { + Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { + safeKey: safeKey, + blobId: blobId, + status: err? String(err): 'SUCCESS', + }); + if (err) { + w.abort(); + return void cb(err); + } + })); + }).nThen(function () { + // archive the proof + return void Env.blobStore.archive.proof(safeKey, blobId, function (err) { + Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { + safeKey: safeKey, + blobId: blobId, + status: err? String(err): 'SUCCESS', + }); + if (err) { + return void cb("E_PROOF_REMOVAL"); + } + cb(void 0, 'OK'); + }); + }); + } + + Metadata.getMetadata(Env, channelId, function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } + if (!Core.isOwner(metadata, unsafeKey)) { + return void cb('INSUFFICIENT_PERMISSIONS'); + } + // temporarily archive the file + return void Env.msgStore.archiveChannel(channelId, function (e) { + Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { + unsafeKey: unsafeKey, + channelId: channelId, + status: e? String(e): 'SUCCESS', + }); + if (e) { + return void cb(e); + } + cb(void 0, 'OK'); + + const channel_cache = Env.historyKeeper.channel_cache; + const metadata_cache = Env.historyKeeper.metadata_cache; + + const clear = function () { + delete channel_cache[channelId]; + Server.clearChannel(channelId); + delete metadata_cache[channelId]; + }; + + // an owner of a channel deleted it + nThen(function (w) { + // close the channel in the store + Env.msgStore.closeChannel(channelId, w()); + }).nThen(function (w) { + // Server.channelBroadcast would be better + // but we can't trust it to track even one callback, + // let alone many in parallel. + // so we simulate it on this side to avoid race conditions + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, [ + 0, + Env.historyKeeper.id, + "MSG", + userId, + JSON.stringify({ + error: 'EDELETED', + channel: channelId, + }) + ], w()); + }); + }).nThen(function () { + // clear the channel's data from memory + // once you've sent everyone a notice that the channel has been deleted + clear(); + }).orTimeout(function () { + Env.Log.warn('ON_CHANNEL_DELETED_TIMEOUT', channelId); + clear(); + }, 30000); + }); + }); +}; + +Channel.trimHistory = function (Env, safeKey, data, cb) { + if (!(data && typeof(data.channel) === 'string' && typeof(data.hash) === 'string' && data.hash.length === 64)) { + return void cb('INVALID_ARGS'); + } + + var channelId = data.channel; + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + var hash = data.hash; + + nThen(function (w) { + Metadata.getMetadata(Env, channelId, w(function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { + w.abort(); + return void cb('E_NO_OWNERS'); + } + if (!Core.isOwner(metadata, unsafeKey)) { + w.abort(); + return void cb("INSUFFICIENT_PERMISSIONS"); + } + // else fall through to the next block + })); + }).nThen(function () { + Env.msgStore.trimChannel(channelId, hash, function (err) { + if (err) { return void cb(err); } + // clear historyKeeper's cache for this channel + Env.historyKeeper.channelClose(channelId); + cb(void 0, 'OK'); + delete Env.historyKeeper.channel_cache[channelId]; + delete Env.historyKeeper.metadata_cache[channelId]; + }); + }); +}; + +var ARRAY_LINE = /^\[/; + +/* Files can contain metadata but not content + call back with true if the channel log has no content other than metadata + otherwise false +*/ +Channel.isNewChannel = function (Env, channel, cb) { + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length !== 32) { return void cb('INVALID_CHAN'); } + + var done = false; + Env.msgStore.getMessages(channel, function (msg) { + if (done) { return; } + try { + if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { + done = true; + return void cb(void 0, false); + } + } catch (e) { + Env.WARN('invalid message read from store', e); + } + }, function () { + if (done) { return; } + // no more messages... + cb(void 0, true); + }); +}; + +/* writePrivateMessage + allows users to anonymously send a message to the channel + prevents their netflux-id from being stored in history + and from being broadcast to anyone that might currently be in the channel + + Otherwise behaves the same as sending to a channel +*/ +Channel.writePrivateMessage = function (Env, args, cb, Server) { + var channelId = args[0]; + var msg = args[1]; + + // don't bother handling empty messages + if (!msg) { return void cb("INVALID_MESSAGE"); } + + // don't support anything except regular channels + if (!Core.isValidId(channelId) || channelId.length !== 32) { + return void cb("INVALID_CHAN"); + } + + // We expect a modern netflux-websocket-server instance + // if this API isn't here everything will fall apart anyway + if (!(Server && typeof(Server.send) === 'function')) { + return void cb("NOT_IMPLEMENTED"); + } + + // historyKeeper expects something with an 'id' attribute + // it will fail unless you provide it, but it doesn't need anything else + var channelStruct = { + id: channelId, + }; + + // construct a message to store and broadcast + var fullMessage = [ + 0, // idk + null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way + "MSG", // indicate that this is a MSG + channelId, // channel id + msg // the actual message content. Generally a string + ]; + + // historyKeeper already knows how to handle metadata and message validation, so we just pass it off here + // if the message isn't valid it won't be stored. + Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage); + + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, fullMessage); + }); + + cb(); +}; + diff --git a/lib/commands/core.js b/lib/commands/core.js new file mode 100644 index 000000000..d7add69b4 --- /dev/null +++ b/lib/commands/core.js @@ -0,0 +1,190 @@ +/*jshint esversion: 6 */ +/* globals process */ +const Core = module.exports; +const Util = require("../common-util"); +const escapeKeyCharacters = Util.escapeKeyCharacters; + +/* Use Nacl for checking signatures of messages */ +const Nacl = require("tweetnacl/nacl-fast"); + + +Core.DEFAULT_LIMIT = 50 * 1024 * 1024; +Core.SESSION_EXPIRATION_TIME = 60 * 1000; + +Core.isValidId = function (chan) { + return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && + [32, 48].indexOf(chan.length) > -1; +}; + +var makeToken = Core.makeToken = function () { + return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) + .toString(16); +}; + +Core.makeCookie = function (token) { + var time = (+new Date()); + time -= time % 5000; + + return [ + time, + process.pid, + token + ]; +}; + +var parseCookie = function (cookie) { + if (!(cookie && cookie.split)) { return null; } + + var parts = cookie.split('|'); + if (parts.length !== 3) { return null; } + + var c = {}; + c.time = new Date(parts[0]); + c.pid = Number(parts[1]); + c.seq = parts[2]; + return c; +}; + +Core.getSession = function (Sessions, key) { + var safeKey = escapeKeyCharacters(key); + if (Sessions[safeKey]) { + Sessions[safeKey].atime = +new Date(); + return Sessions[safeKey]; + } + var user = Sessions[safeKey] = {}; + user.atime = +new Date(); + user.tokens = [ + makeToken() + ]; + return user; +}; + +Core.expireSession = function (Sessions, safeKey) { + var session = Sessions[safeKey]; + if (!session) { return; } + if (session.blobstage) { + session.blobstage.close(); + } + delete Sessions[safeKey]; +}; + +Core.expireSessionAsync = function (Env, safeKey, cb) { + setTimeout(function () { + Core.expireSession(Env.Sessions, safeKey); + cb(void 0, 'OK'); + }); +}; + +var isTooOld = function (time, now) { + return (now - time) > 300000; +}; + +Core.expireSessions = function (Sessions) { + var now = +new Date(); + Object.keys(Sessions).forEach(function (safeKey) { + var session = Sessions[safeKey]; + if (session && isTooOld(session.atime, now)) { + Core.expireSession(Sessions, safeKey); + } + }); +}; + +var addTokenForKey = function (Sessions, publicKey, token) { + if (!Sessions[publicKey]) { throw new Error('undefined user'); } + + var user = Core.getSession(Sessions, publicKey); + user.tokens.push(token); + user.atime = +new Date(); + if (user.tokens.length > 2) { user.tokens.shift(); } +}; + +Core.isValidCookie = function (Sessions, publicKey, cookie) { + var parsed = parseCookie(cookie); + if (!parsed) { return false; } + + var now = +new Date(); + + if (!parsed.time) { return false; } + if (isTooOld(parsed.time, now)) { + return false; + } + + // different process. try harder + if (process.pid !== parsed.pid) { + return false; + } + + var user = Core.getSession(Sessions, publicKey); + if (!user) { return false; } + + var idx = user.tokens.indexOf(parsed.seq); + if (idx === -1) { return false; } + + if (idx > 0) { + // make a new token + addTokenForKey(Sessions, publicKey, Core.makeToken()); + } + + return true; +}; + +Core.checkSignature = function (Env, signedMsg, signature, publicKey) { + if (!(signedMsg && publicKey)) { return false; } + + var signedBuffer; + var pubBuffer; + var signatureBuffer; + + try { + signedBuffer = Nacl.util.decodeUTF8(signedMsg); + } catch (e) { + Env.Log.error('INVALID_SIGNED_BUFFER', signedMsg); + return null; + } + + try { + pubBuffer = Nacl.util.decodeBase64(publicKey); + } catch (e) { + return false; + } + + try { + signatureBuffer = Nacl.util.decodeBase64(signature); + } catch (e) { + return false; + } + + if (pubBuffer.length !== 32) { + Env.Log.error('PUBLIC_KEY_LENGTH', publicKey); + return false; + } + + if (signatureBuffer.length !== 64) { + return false; + } + + return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer); +}; + +// E_NO_OWNERS +Core.hasOwners = function (metadata) { + return Boolean(metadata && Array.isArray(metadata.owners)); +}; + +Core.hasPendingOwners = function (metadata) { + return Boolean(metadata && Array.isArray(metadata.pending_owners)); +}; + +// INSUFFICIENT_PERMISSIONS +Core.isOwner = function (metadata, unsafeKey) { + return metadata.owners.indexOf(unsafeKey) !== -1; +}; + +Core.isPendingOwner = function (metadata, unsafeKey) { + return metadata.pending_owners.indexOf(unsafeKey) !== -1; +}; + +Core.haveACookie = function (Env, safeKey, cb) { + cb(); +}; + diff --git a/lib/commands/metadata.js b/lib/commands/metadata.js new file mode 100644 index 000000000..41aea9888 --- /dev/null +++ b/lib/commands/metadata.js @@ -0,0 +1,128 @@ +/*jshint esversion: 6 */ +const Data = module.exports; + +const Meta = require("../metadata"); +const BatchRead = require("../batch-read"); +const WriteQueue = require("../write-queue"); +const Core = require("./core"); +const Util = require("../common-util"); + +const batchMetadata = BatchRead("GET_METADATA"); +Data.getMetadata = function (Env, channel, cb/* , Server */) { + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length !== 32) { return cb("INVALID_CHAN_LENGTH"); } + + // FIXME get metadata from the server cache if it is available + batchMetadata(channel, cb, function (done) { + var ref = {}; + var lineHandler = Meta.createLineHandler(ref, Env.Log.error); + + return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) { + if (err) { + // stream errors? + return void done(err); + } + done(void 0, ref.meta); + }); + }); +}; + +/* setMetadata + - write a new line to the metadata log if a valid command is provided + - data is an object: { + channel: channelId, + command: metadataCommand (string), + value: value + } +*/ +var queueMetadata = WriteQueue(); +Data.setMetadata = function (Env, safeKey, data, cb, Server) { + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + var channel = data.channel; + var command = data.command; + if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); } + if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); } + if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); } + + queueMetadata(channel, function (next) { + Data.getMetadata(Env, channel, function (err, metadata) { + if (err) { + cb(err); + return void next(); + } + if (!Core.hasOwners(metadata)) { + cb('E_NO_OWNERS'); + return void next(); + } + + // if you are a pending owner and not an owner + // you can either ADD_OWNERS, or RM_PENDING_OWNERS + // and you should only be able to add yourself as an owner + // everything else should be rejected + // else if you are not an owner + // you should be rejected + // else write the command + + // Confirm that the channel is owned by the user in question + // or the user is accepting a pending ownership offer + if (Core.hasPendingOwners(metadata) && + Core.isPendingOwner(metadata, unsafeKey) && + !Core.isOwner(metadata, unsafeKey)) { + + // If you are a pending owner, make sure you can only add yourelf as an owner + if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') + || !Array.isArray(data.value) + || data.value.length !== 1 + || data.value[0] !== unsafeKey) { + cb('INSUFFICIENT_PERMISSIONS'); + return void next(); + } + // FIXME wacky fallthrough is hard to read + // we could pass this off to a writeMetadataCommand function + // and make the flow easier to follow + } else if (!Core.isOwner(metadata, unsafeKey)) { + cb('INSUFFICIENT_PERMISSIONS'); + return void next(); + } + + // Add the new metadata line + var line = [command, data.value, +new Date()]; + var changed = false; + try { + changed = Meta.handleCommand(metadata, line); + } catch (e) { + cb(e); + return void next(); + } + + // if your command is valid but it didn't result in any change to the metadata, + // call back now and don't write any "useless" line to the log + if (!changed) { + cb(void 0, metadata); + return void next(); + } + Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { + if (e) { + cb(e); + return void next(); + } + + cb(void 0, metadata); + next(); + + const metadata_cache = Env.historyKeeper.metadata_cache; + const channel_cache = Env.historyKeeper.channel_cache; + + metadata_cache[channel] = metadata; + + var index = Util.find(channel_cache, [channel, 'index']); + if (index && typeof(index) === 'object') { index.metadata = metadata; } + + Server.channelBroadcast(channel, JSON.stringify(metadata), Env.historyKeeper.id); + }); + }); + }); +}; + + diff --git a/lib/commands/pin-rpc.js b/lib/commands/pin-rpc.js new file mode 100644 index 000000000..e490f713d --- /dev/null +++ b/lib/commands/pin-rpc.js @@ -0,0 +1,464 @@ +/*jshint esversion: 6 */ +const Core = require("./core"); + +const BatchRead = require("../batch-read"); +const Pins = require("../pins"); + +const Pinning = module.exports; +const Nacl = require("tweetnacl/nacl-fast"); +const Util = require("../common-util"); +const nThen = require("nthen"); +const Saferphore = require("saferphore"); +const Pinned = require('../../scripts/pinned'); + +//const escapeKeyCharacters = Util.escapeKeyCharacters; +const unescapeKeyCharacters = Util.unescapeKeyCharacters; + +var sumChannelSizes = function (sizes) { + return Object.keys(sizes).map(function (id) { return sizes[id]; }) + .filter(function (x) { + // only allow positive numbers + return !(typeof(x) !== 'number' || x <= 0); + }) + .reduce(function (a, b) { return a + b; }, 0); +}; + +// FIXME it's possible for this to respond before the server has had a chance +// to fetch the limits. Maybe we should respond with an error... +// or wait until we actually know the limits before responding +var getLimit = Pinning.getLimit = function (Env, publicKey, cb) { + var unescapedKey = unescapeKeyCharacters(publicKey); + var limit = Env.limits[unescapedKey]; + var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? + Env.defaultStorageLimit: Core.DEFAULT_LIMIT; + + var toSend = limit && typeof(limit.limit) === "number"? + [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; + + cb(void 0, toSend); +}; + +var addPinned = function ( + Env, + publicKey /*:string*/, + channelList /*Array*/, + cb /*:()=>void*/) +{ + Env.evPinnedPadsReady.reg(() => { + channelList.forEach((c) => { + const x = Env.pinnedPads[c] = Env.pinnedPads[c] || {}; + x[publicKey] = 1; + }); + cb(); + }); +}; +var removePinned = function ( + Env, + publicKey /*:string*/, + channelList /*Array*/, + cb /*:()=>void*/) +{ + Env.evPinnedPadsReady.reg(() => { + channelList.forEach((c) => { + const x = Env.pinnedPads[c]; + if (!x) { return; } + delete x[publicKey]; + }); + cb(); + }); +}; + +var getMultipleFileSize = function (Env, channels, cb) { + if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); } + if (typeof(Env.msgStore.getChannelSize) !== 'function') { + return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); + } + + var i = channels.length; + var counts = {}; + + var done = function () { + i--; + if (i === 0) { return cb(void 0, counts); } + }; + + channels.forEach(function (channel) { + Pinning.getFileSize(Env, channel, function (e, size) { + if (e) { + // most likely error here is that a file no longer exists + // but a user still has it in their drive, and wants to know + // its size. We should find a way to inform them of this in + // the future. For now we can just tell them it has no size. + + //WARN('getFileSize', e); + counts[channel] = 0; + return done(); + } + counts[channel] = size; + done(); + }); + }); +}; + +const batchUserPins = BatchRead("LOAD_USER_PINS"); +var loadUserPins = function (Env, publicKey, cb) { + var session = Core.getSession(Env.Sessions, publicKey); + + if (session.channels) { + return cb(session.channels); + } + + batchUserPins(publicKey, cb, function (done) { + var ref = {}; + var lineHandler = Pins.createLineHandler(ref, function (label, data) { + Env.Log.error(label, { + log: publicKey, + data: data, + }); + }); + + // if channels aren't in memory. load them from disk + Env.pinStore.getMessages(publicKey, lineHandler, function () { + // no more messages + + // only put this into the cache if it completes + session.channels = ref.pins; + done(ref.pins); // FIXME no error handling? + }); + }); +}; + +var truthyKeys = function (O) { + return Object.keys(O).filter(function (k) { + return O[k]; + }); +}; + +var getChannelList = Pinning.getChannelList = function (Env, publicKey, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + loadUserPins(Env, publicKey, function (pins) { + cb(truthyKeys(pins)); + }); +}; + +const batchTotalSize = BatchRead("GET_TOTAL_SIZE"); +Pinning.getTotalSize = function (Env, publicKey, cb) { + var unescapedKey = unescapeKeyCharacters(publicKey); + var limit = Env.limits[unescapedKey]; + + // Get a common key if multiple users share the same quota, otherwise take the public key + var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : publicKey; + + batchTotalSize(batchKey, cb, function (done) { + var channels = []; + var bytes = 0; + nThen(function (waitFor) { + // Get the channels list for our user account + Pinning.getChannelList(Env, publicKey, waitFor(function (_channels) { + if (!_channels) { + waitFor.abort(); + return done('INVALID_PIN_LIST'); + } + Array.prototype.push.apply(channels, _channels); + })); + // Get the channels list for users sharing our quota + if (limit && Array.isArray(limit.users) && limit.users.length > 1) { + limit.users.forEach(function (key) { + if (key === unescapedKey) { return; } // Don't count ourselves twice + getChannelList(Env, key, waitFor(function (_channels) { + if (!_channels) { return; } // Broken user, don't count their quota + Array.prototype.push.apply(channels, _channels); + })); + }); + } + }).nThen(function (waitFor) { + // Get size of the channels + var list = []; // Contains the channels already counted in the quota to avoid duplicates + channels.forEach(function (channel) { // TODO semaphore? + if (list.indexOf(channel) !== -1) { return; } + list.push(channel); + Pinning.getFileSize(Env, channel, waitFor(function (e, size) { + if (!e) { bytes += size; } + })); + }); + }).nThen(function () { + done(void 0, bytes); + }); + }); +}; + +/* Users should be able to clear their own pin log with an authenticated RPC +*/ +Pinning.removePins = function (Env, safeKey, cb) { + if (typeof(Env.pinStore.removeChannel) !== 'function') { + return void cb("E_NOT_IMPLEMENTED"); + } + Env.pinStore.removeChannel(safeKey, function (err) { + Env.Log.info('DELETION_PIN_BY_OWNER_RPC', { + safeKey: safeKey, + status: err? String(err): 'SUCCESS', + }); + + if (err) { return void cb(err); } + cb(void 0, 'OK'); + }); +}; + +Pinning.trimPins = function (Env, safeKey, cb) { + cb("NOT_IMPLEMENTED"); +}; + +var getFreeSpace = Pinning.getFreeSpace = function (Env, publicKey, cb) { + getLimit(Env, publicKey, function (e, limit) { + if (e) { return void cb(e); } + Pinning.getTotalSize(Env, publicKey, function (e, size) { + if (typeof(size) === 'undefined') { return void cb(e); } + + var rem = limit[0] - size; + if (typeof(rem) !== 'number') { + return void cb('invalid_response'); + } + cb(void 0, rem); + }); + }); +}; + +var hashChannelList = function (A) { + var uniques = []; + + A.forEach(function (a) { + if (uniques.indexOf(a) === -1) { uniques.push(a); } + }); + uniques.sort(); + + var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl + .util.decodeUTF8(JSON.stringify(uniques)))); + + return hash; +}; + +var getHash = Pinning.getHash = function (Env, publicKey, cb) { + getChannelList(Env, publicKey, function (channels) { + cb(void 0, hashChannelList(channels)); + }); +}; + +Pinning.pinChannel = function (Env, publicKey, channels, cb) { + if (!channels && channels.filter) { + return void cb('INVALID_PIN_LIST'); + } + + // get channel list ensures your session has a cached channel list + getChannelList(Env, publicKey, function (pinned) { + var session = Core.getSession(Env.Sessions, publicKey); + + // only pin channels which are not already pinned + var toStore = channels.filter(function (channel) { + return pinned.indexOf(channel) === -1; + }); + + if (toStore.length === 0) { + return void getHash(Env, publicKey, cb); + } + + getMultipleFileSize(Env, toStore, function (e, sizes) { + if (typeof(sizes) === 'undefined') { return void cb(e); } + var pinSize = sumChannelSizes(sizes); + + getFreeSpace(Env, publicKey, function (e, free) { + if (typeof(free) === 'undefined') { + Env.WARN('getFreeSpace', e); + return void cb(e); + } + if (pinSize > free) { return void cb('E_OVER_LIMIT'); } + + Env.pinStore.message(publicKey, JSON.stringify(['PIN', toStore, +new Date()]), + function (e) { + if (e) { return void cb(e); } + toStore.forEach(function (channel) { + session.channels[channel] = true; + }); + addPinned(Env, publicKey, toStore, () => {}); + getHash(Env, publicKey, cb); + }); + }); + }); + }); +}; + +Pinning.unpinChannel = function (Env, publicKey, channels, cb) { + if (!channels && channels.filter) { + // expected array + return void cb('INVALID_PIN_LIST'); + } + + getChannelList(Env, publicKey, function (pinned) { + var session = Core.getSession(Env.Sessions, publicKey); + + // only unpin channels which are pinned + var toStore = channels.filter(function (channel) { + return pinned.indexOf(channel) !== -1; + }); + + if (toStore.length === 0) { + return void getHash(Env, publicKey, cb); + } + + Env.pinStore.message(publicKey, JSON.stringify(['UNPIN', toStore, +new Date()]), + function (e) { + if (e) { return void cb(e); } + toStore.forEach(function (channel) { + delete session.channels[channel]; + }); + removePinned(Env, publicKey, toStore, () => {}); + getHash(Env, publicKey, cb); + }); + }); +}; + +Pinning.resetUserPins = function (Env, publicKey, channelList, cb) { + if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } + var session = Core.getSession(Env.Sessions, publicKey); + + if (!channelList.length) { + return void getHash(Env, publicKey, function (e, hash) { + if (e) { return cb(e); } + cb(void 0, hash); + }); + } + + var pins = {}; + getMultipleFileSize(Env, channelList, function (e, sizes) { + if (typeof(sizes) === 'undefined') { return void cb(e); } + var pinSize = sumChannelSizes(sizes); + + + getLimit(Env, publicKey, function (e, limit) { + if (e) { + Env.WARN('[RESET_ERR]', e); + return void cb(e); + } + + /* we want to let people pin, even if they are over their limit, + but they should only be able to do this once. + + This prevents data loss in the case that someone registers, but + does not have enough free space to pin their migrated data. + + They will not be able to pin additional pads until they upgrade + or delete enough files to go back under their limit. */ + if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } + Env.pinStore.message(publicKey, JSON.stringify(['RESET', channelList, +new Date()]), + function (e) { + if (e) { return void cb(e); } + channelList.forEach(function (channel) { + pins[channel] = true; + }); + + var oldChannels; + if (session.channels && typeof(session.channels) === 'object') { + oldChannels = Object.keys(session.channels); + } else { + oldChannels = []; + } + removePinned(Env, publicKey, oldChannels, () => { + addPinned(Env, publicKey, channelList, ()=>{}); + }); + + // update in-memory cache IFF the reset was allowed. + session.channels = pins; + getHash(Env, publicKey, function (e, hash) { + cb(e, hash); + }); + }); + }); + }); +}; + +Pinning.getFileSize = function (Env, channel, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length === 32) { + if (typeof(Env.msgStore.getChannelSize) !== 'function') { + return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); + } + + return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) { + if (e) { + if (e.code === 'ENOENT') { return void cb(void 0, 0); } + return void cb(e.code); + } + cb(void 0, size); + }); + } + + // 'channel' refers to a file, so you need another API + Env.blobStore.size(channel, function (e, size) { + if (typeof(size) === 'undefined') { return void cb(e); } + cb(void 0, size); + }); +}; + +/* accepts a list, and returns a sublist of channel or file ids which seem + to have been deleted from the server (file size 0) + + we might consider that we should only say a file is gone if fs.stat returns + ENOENT, but for now it's simplest to just rely on getFileSize... +*/ +Pinning.getDeletedPads = function (Env, channels, cb) { + if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } + var L = channels.length; + + var sem = Saferphore.create(10); + var absentees = []; + + var job = function (channel, wait) { + return function (give) { + Pinning.getFileSize(Env, channel, wait(give(function (e, size) { + if (e) { return; } + if (size === 0) { absentees.push(channel); } + }))); + }; + }; + + nThen(function (w) { + for (var i = 0; i < L; i++) { + sem.take(job(channels[i], w)); + } + }).nThen(function () { + cb(void 0, absentees); + }); +}; + +// inform that the +Pinning.loadChannelPins = function (Env) { + Pinned.load(function (err, data) { + if (err) { + Env.Log.error("LOAD_CHANNEL_PINS", err); + + // FIXME not sure what should be done here instead + Env.pinnedPads = {}; + Env.evPinnedPadsReady.fire(); + return; + } + + + Env.pinnedPads = data; + Env.evPinnedPadsReady.fire(); + }, { + pinPath: Env.paths.pin, + }); +}; + +Pinning.isChannelPinned = function (Env, channel, cb) { + Env.evPinnedPadsReady.reg(() => { + if (Env.pinnedPads[channel] && Object.keys(Env.pinnedPads[channel]).length) { // FIXME 'Object.keys' here is overkill. We only need to know that it isn't empty + cb(void 0, true); + } else { + delete Env.pinnedPads[channel]; + cb(void 0, false); + } + }); +}; + + diff --git a/lib/commands/quota.js b/lib/commands/quota.js new file mode 100644 index 000000000..74c4eca44 --- /dev/null +++ b/lib/commands/quota.js @@ -0,0 +1,107 @@ +/*jshint esversion: 6 */ +/* globals Buffer*/ +const Quota = module.exports; + +const Util = require("../common-util"); +const Package = require('../../package.json'); +const Https = require("https"); + +Quota.applyCustomLimits = function (Env) { + var isLimit = function (o) { + var valid = o && typeof(o) === 'object' && + typeof(o.limit) === 'number' && + typeof(o.plan) === 'string' && + typeof(o.note) === 'string'; + return valid; + }; + + // read custom limits from the Environment (taken from config) + var customLimits = (function (custom) { + var limits = {}; + Object.keys(custom).forEach(function (k) { + k.replace(/\/([^\/]+)$/, function (all, safeKey) { + var id = Util.unescapeKeyCharacters(safeKey || ''); + limits[id] = custom[k]; + return ''; + }); + }); + return limits; + }(Env.customLimits || {})); + + Object.keys(customLimits).forEach(function (k) { + if (!isLimit(customLimits[k])) { return; } + Env.limits[k] = customLimits[k]; + }); +}; + +Quota.updateCachedLimits = function (Env, cb) { + if (Env.adminEmail === false) { + Quota.applyCustomLimits(Env); + if (Env.allowSubscriptions === false) { return; } + throw new Error("allowSubscriptions must be false if adminEmail is false"); + } + + var body = JSON.stringify({ + domain: Env.myDomain, + subdomain: Env.mySubdomain || null, + adminEmail: Env.adminEmail, + version: Package.version + }); + var options = { + host: 'accounts.cryptpad.fr', + path: '/api/getauthorized', + method: 'POST', + headers: { + "Content-Type": "application/json", + "Content-Length": Buffer.byteLength(body) + } + }; + + var req = Https.request(options, function (response) { + if (!('' + response.statusCode).match(/^2\d\d$/)) { + return void cb('SERVER ERROR ' + response.statusCode); + } + var str = ''; + + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + try { + var json = JSON.parse(str); + Env.limits = json; + Quota.applyCustomLimits(Env); + cb(void 0); + } catch (e) { + cb(e); + } + }); + }); + + req.on('error', function (e) { + Quota.applyCustomLimits(Env); + // FIXME this is always falsey. Maybe we just suppress errors? + if (!Env.domain) { return cb(); } + cb(e); + }); + + req.end(body); +}; + +// The limits object contains storage limits for all the publicKey that have paid +// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit +Quota.getUpdatedLimit = function (Env, safeKey, cb) { // FIXME BATCH?S + Quota.updateCachedLimits(Env, function (err) { + if (err) { return void cb(err); } + + var limit = Env.limits[safeKey]; + + if (limit && typeof(limit.limit) === 'number') { + return void cb(void 0, [limit.limit, limit.plan, limit.note]); + } + + return void cb(void 0, [Env.defaultStorageLimit, '', '']); + }); +}; + diff --git a/lib/commands/upload.js b/lib/commands/upload.js new file mode 100644 index 000000000..66868a65d --- /dev/null +++ b/lib/commands/upload.js @@ -0,0 +1,57 @@ +/*jshint esversion: 6 */ +const Upload = module.exports; +const Util = require("../common-util"); +const Pinning = require("./pin-rpc"); +const nThen = require("nthen"); +const Core = require("./core"); + +Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES + var cb = Util.once(Util.mkAsync(_cb)); + + // validate that the provided size is actually a positive number + if (typeof(filesize) !== 'number' && + filesize >= 0) { return void cb('E_INVALID_SIZE'); } + + if (filesize >= Env.maxUploadSize) { return cb('TOO_LARGE'); } + + nThen(function (w) { + var abortAndCB = Util.both(w.abort, cb); + Env.blobStore.status(safeKey, w(function (err, inProgress) { + // if there's an error something is weird + if (err) { return void abortAndCB(err); } + + // we cannot upload two things at once + if (inProgress) { return void abortAndCB(void 0, true); } + })); + }).nThen(function () { + // if yuo're here then there are no pending uploads + // check if you have space in your quota to upload something of this size + Pinning.getFreeSpace(Env, safeKey, function (e, free) { + if (e) { return void cb(e); } + if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } + + var user = Core.getSession(Env.Sessions, safeKey); + user.pendingUploadSize = filesize; + user.currentUploadSize = 0; + + cb(void 0, false); + }); + }); +}; + +Upload.upload = function (Env, safeKey, chunk, cb) { + Env.blobStore.upload(safeKey, chunk, cb); +}; + +Upload.complete = function (Env, safeKey, arg, cb) { + Env.blobStore.complete(safeKey, arg, cb); +}; + +Upload.cancel = function (Env, safeKey, arg, cb) { + Env.blobStore.cancel(safeKey, arg, cb); +}; + +Upload.complete_owned = function (Env, safeKey, arg, cb) { + Env.blobStore.completeOwned(safeKey, arg, cb); +}; + diff --git a/lib/historyKeeper.js b/lib/historyKeeper.js new file mode 100644 index 000000000..dd80d0b53 --- /dev/null +++ b/lib/historyKeeper.js @@ -0,0 +1,97 @@ +/* jshint esversion: 6 */ + +const nThen = require('nthen'); +const Crypto = require('crypto'); +const WriteQueue = require("./write-queue"); +const BatchRead = require("./batch-read"); +const RPC = require("./rpc"); +const HK = require("./hk-util.js"); + +module.exports.create = function (config, cb) { + const Log = config.log; + + Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); + + // TODO populate Env with everything that you use from config + // so that you can stop passing around your raw config + // and more easily share state between historyKeeper and rpc + const Env = { + Log: Log, + // tasks + // store + id: Crypto.randomBytes(8).toString('hex'), + + metadata_cache: {}, + channel_cache: {}, + queueStorage: WriteQueue(), + batchIndexReads: BatchRead("HK_GET_INDEX"), + }; + + config.historyKeeper = { + metadata_cache: Env.metadata_cache, + channel_cache: Env.channel_cache, + + id: Env.id, + + channelMessage: function (Server, channel, msgStruct) { + // netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel + // historyKeeper stores these messages if the channel id indicates that they are + // a channel type with permanent history + HK.onChannelMessage(Env, Server, channel, msgStruct); + }, + channelClose: function (channelName) { + // netflux-server emits 'channelClose' events whenever everyone leaves a channel + // we drop cached metadata and indexes at the same time + HK.dropChannel(Env, channelName); + }, + channelOpen: function (Server, channelName, userId) { + Env.channel_cache[channelName] = {}; + Server.send(userId, [ + 0, + Env.id, + 'JOIN', + channelName + ]); + }, + directMessage: function (Server, seq, userId, json) { + // netflux-server allows you to register an id with a handler + // this handler is invoked every time someone sends a message to that id + HK.onDirectMessage(Env, Server, seq, userId, json); + }, + }; + + Log.verbose('HK_ID', 'History keeper ID: ' + Env.id); + + nThen(function (w) { + require('../storage/file').create(config, w(function (_store) { + config.store = _store; + Env.store = _store; + })); + }).nThen(function (w) { + require("../storage/tasks").create(config, w(function (e, tasks) { + if (e) { + throw e; + } + Env.tasks = tasks; + config.tasks = tasks; + if (config.disableIntegratedTasks) { return; } + + config.intervals = config.intervals || {}; + config.intervals.taskExpiration = setInterval(function () { + tasks.runAll(function (err) { + if (err) { + // either TASK_CONCURRENCY or an error with tasks.list + // in either case it is already logged. + } + }); + }, 1000 * 60 * 5); // run every five minutes + })); + }).nThen(function () { + RPC.create(config, function (err, _rpc) { + if (err) { throw err; } + + Env.rpc = _rpc; + cb(void 0, config.historyKeeper); + }); + }); +}; diff --git a/lib/hk-util.js b/lib/hk-util.js new file mode 100644 index 000000000..69832a544 --- /dev/null +++ b/lib/hk-util.js @@ -0,0 +1,929 @@ +/* jshint esversion: 6 */ +/* global Buffer */ +var HK = module.exports; + +const nThen = require('nthen'); +const Once = require("./once"); +const Meta = require("./metadata"); +const Nacl = require('tweetnacl/nacl-fast'); + +const now = function () { return (new Date()).getTime(); }; +const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds + +/* getHash + * this function slices off the leading portion of a message which is + most likely unique + * these "hashes" are used to identify particular messages in a channel's history + * clients store "hashes" either in memory or in their drive to query for new messages: + * when reconnecting to a pad + * when connecting to chat or a mailbox + * thus, we can't change this function without invalidating client data which: + * is encrypted clientside + * can't be easily migrated + * don't break it! +*/ +const getHash = HK.getHash = function (msg, Log) { + if (typeof(msg) !== 'string') { + if (Log) { + Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); + } + return ''; + } + return msg.slice(0,64); +}; + +// historyKeeper should explicitly store any channel +// with a 32 character id +const STANDARD_CHANNEL_LENGTH = HK.STANDARD_CHANNEL_LENGTH = 32; + +// historyKeeper should not store messages sent to any channel +// with a 34 character id +const EPHEMERAL_CHANNEL_LENGTH = HK.EPHEMERAL_CHANNEL_LENGTH = 34; + +const tryParse = function (Env, str) { + try { + return JSON.parse(str); + } catch (err) { + Env.Log.error('HK_PARSE_ERROR', err); + } +}; + +/* sliceCpIndex + returns a list of all checkpoints which might be relevant for a client connecting to a session + + * if there are two or fewer checkpoints, return everything you have + * if there are more than two + * return at least two + * plus any more which were received within the last 100 messages + + This is important because the additional history is what prevents + clients from forking on checkpoints and dropping forked history. + +*/ +const sliceCpIndex = function (cpIndex, line) { + // Remove "old" checkpoints (cp sent before 100 messages ago) + const minLine = Math.max(0, (line - 100)); + let start = cpIndex.slice(0, -2); + const end = cpIndex.slice(-2); + start = start.filter(function (obj) { + return obj.line > minLine; + }); + return start.concat(end); +}; + +const isMetadataMessage = function (parsed) { + return Boolean(parsed && parsed.channel); +}; + +// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays +const isValidValidateKeyString = function (key) { + try { + return typeof(key) === 'string' && + Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; + } catch (e) { + return false; + } +}; + +var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; + +/* expireChannel is here to clean up channels that should have been removed + but for some reason are still present +*/ +const expireChannel = function (Env, channel) { + return void Env.store.archiveChannel(channel, function (err) { + Env.Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { + channelId: channel, + status: err? String(err): "SUCCESS", + }); + }); +}; + +/* dropChannel + * cleans up memory structures which are managed entirely by the historyKeeper +*/ +const dropChannel = HK.dropChannel = function (Env, chanName) { + delete Env.metadata_cache[chanName]; + delete Env.channel_cache[chanName]; +}; + +/* checkExpired + * synchronously returns true or undefined to indicate whether the channel is expired + * according to its metadata + * has some side effects: + * closes the channel via the store.closeChannel API + * and then broadcasts to all channel members that the channel has expired + * removes the channel from the netflux-server's in-memory cache + * removes the channel metadata from history keeper's in-memory cache + + FIXME the boolean nature of this API should be separated from its side effects +*/ +const checkExpired = function (Env, Server, channel) { + const store = Env.store; + const metadata_cache = Env.metadata_cache; + + if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } + let metadata = metadata_cache[channel]; + if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } + + // the number of milliseconds ago the channel should have expired + let pastDue = (+new Date()) - metadata.expire; + + // less than zero means that it hasn't expired yet + if (pastDue < 0) { return false; } + + // if it should have expired more than a day ago... + // there may have been a problem with scheduling tasks + // or the scheduled tasks may not be running + // so trigger a removal from here + if (pastDue >= ONE_DAY) { expireChannel(Env, channel); } + + // close the channel + store.closeChannel(channel, function () { + Server.channelBroadcast(channel, { + error: 'EEXPIRED', + channel: channel + }, Env.id); + dropChannel(channel); + }); + + // return true to indicate that it has expired + return true; +}; + +/* computeIndex + can call back with an error or a computed index which includes: + * cpIndex: + * array including any checkpoints pushed within the last 100 messages + * processed by 'sliceCpIndex(cpIndex, line)' + * offsetByHash: + * a map containing message offsets by their hash + * this is for every message in history, so it could be very large... + * except we remove offsets from the map if they occur before the oldest relevant checkpoint + * size: in bytes + * metadata: + * validationKey + * expiration time + * owners + * ??? (anything else we might add in the future) + * line + * the number of messages in history + * including the initial metadata line, if it exists + +*/ +const computeIndex = function (Env, channelName, cb) { + const store = Env.store; + const Log = Env.Log; + + const cpIndex = []; + let messageBuf = []; + let metadata; + let i = 0; + + const ref = {}; + + const CB = Once(cb); + + const offsetByHash = {}; + let size = 0; + nThen(function (w) { + // iterate over all messages in the channel log + // old channels can contain metadata as the first message of the log + // remember metadata the first time you encounter it + // otherwise index important messages in the log + store.readMessagesBin(channelName, 0, (msgObj, readMore) => { + let msg; + // keep an eye out for the metadata line if you haven't already seen it + // but only check for metadata on the first line + if (!i && !metadata && msgObj.buff.indexOf('{') === 0) { + i++; // always increment the message counter + msg = tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return readMore(); } + + // validate that the current line really is metadata before storing it as such + if (isMetadataMessage(msg)) { + metadata = msg; + return readMore(); + } + } + i++; + if (msgObj.buff.indexOf('cp|') > -1) { + msg = msg || tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return readMore(); } + // cache the offsets of checkpoints if they can be parsed + if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { + cpIndex.push({ + offset: msgObj.offset, + line: i + }); + // we only want to store messages since the latest checkpoint + // so clear the buffer every time you see a new one + messageBuf = []; + } + } + // if it's not metadata or a checkpoint then it should be a regular message + // store it in the buffer + messageBuf.push(msgObj); + return readMore(); + }, w((err) => { + if (err && err.code !== 'ENOENT') { + w.abort(); + return void CB(err); + } + + // once indexing is complete you should have a buffer of messages since the latest checkpoint + // map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients + messageBuf.forEach((msgObj) => { + const msg = tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return; } + if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { + // msgObj.offset is API guaranteed by our storage module + // it should always be a valid positive integer + offsetByHash[getHash(msg[4], Log)] = msgObj.offset; + } + // There is a trailing \n at the end of the file + size = msgObj.offset + msgObj.buff.length + 1; + }); + })); + }).nThen(function (w) { + // create a function which will iterate over amendments to the metadata + const handler = Meta.createLineHandler(ref, Log.error); + + // initialize the accumulator in case there was a foundational metadata line in the log content + if (metadata) { handler(void 0, metadata); } + + // iterate over the dedicated metadata log (if it exists) + // proceed even in the event of a stream error on the metadata log + store.readDedicatedMetadata(channelName, handler, w(function (err) { + if (err) { + return void Log.error("DEDICATED_METADATA_ERROR", err); + } + })); + }).nThen(function () { + // when all is done, cache the metadata in memory + if (ref.index) { // but don't bother if no metadata was found... + metadata = Env.metadata_cache[channelName] = ref.meta; + } + // and return the computed index + CB(null, { + // Only keep the checkpoints included in the last 100 messages + cpIndex: sliceCpIndex(cpIndex, i), + offsetByHash: offsetByHash, + size: size, + metadata: metadata, + line: i + }); + }); +}; + +/* getIndex + calls back with an error if anything goes wrong + or with a cached index for a channel if it exists + (along with metadata) + otherwise it calls back with the index computed by 'computeIndex' + + as an added bonus: + if the channel exists but its index does not then it caches the index +*/ +const getIndex = (Env, channelName, cb) => { + const channel_cache = Env.channel_cache; + + const chan = channel_cache[channelName]; + + // if there is a channel in memory and it has an index cached, return it + if (chan && chan.index) { + // enforce async behaviour + return void setTimeout(function () { + cb(undefined, chan.index); + }); + } + + Env.batchIndexReads(channelName, cb, function (done) { + computeIndex(Env, channelName, (err, ret) => { + // this is most likely an unrecoverable filesystem error + if (err) { return void done(err); } + // cache the computed result if possible + if (chan) { chan.index = ret; } + // return + done(void 0, ret); + }); + }); +}; + +/* storeMessage + * channel id + * the message to store + * whether the message is a checkpoint + * optionally the hash of the message + * it's not always used, but we guard against it + + + * async but doesn't have a callback + * source of a race condition whereby: + * two messaages can be inserted + * two offsets can be computed using the total size of all the messages + * but the offsets don't correspond to the actual location of the newlines + * because the two actions were performed like ABba... + * the fix is to use callbacks and implement queueing for writes + * to guarantee that offset computation is always atomic with writes +*/ +const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) { + const id = channel.id; + const Log = Env.Log; + + Env.queueStorage(id, function (next) { + const msgBin = Buffer.from(msg + '\n', 'utf8'); + // Store the message first, and update the index only once it's stored. + // store.messageBin can be async so updating the index first may + // result in a wrong cpIndex + nThen((waitFor) => { + Env.store.messageBin(id, msgBin, waitFor(function (err) { + if (err) { + waitFor.abort(); + Log.error("HK_STORE_MESSAGE_ERROR", err.message); + + // this error is critical, but there's not much we can do at the moment + // proceed with more messages, but they'll probably fail too + // at least you won't have a memory leak + + // TODO make it possible to respond to clients with errors so they know + // their message wasn't stored + return void next(); + } + })); + }).nThen((waitFor) => { + getIndex(Env, id, waitFor((err, index) => { + if (err) { + Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); + // non-critical, we'll be able to get the channel index later + return void next(); + } + if (typeof (index.line) === "number") { index.line++; } + if (isCp) { + index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); + for (let k in index.offsetByHash) { + if (index.offsetByHash[k] < index.cpIndex[0]) { + delete index.offsetByHash[k]; + } + } + index.cpIndex.push({ + offset: index.size, + line: ((index.line || 0) + 1) + }); + } + if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } + index.size += msgBin.length; + + // handle the next element in the queue + next(); + })); + }); + }); +}; + + +/* getHistoryOffset + returns a number representing the byte offset from the start of the log + for whatever history you're seeking. + + query by providing a 'lastKnownHash', + which is really just a string of the first 64 characters of an encrypted message. + OR by -1 which indicates that we want the full history (byte offset 0) + OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant + (typically the last few checkpoints) + + this function embeds a lot of the history keeper's logic: + + 0. if you passed -1 as the lastKnownHash it means you want the complete history + * I'm not sure why you'd need to call this function if you know it will return 0 in this case... + * it has a side-effect of filling the index cache if it's empty + 1. if you provided a lastKnownHash and that message does not exist in the history: + * either the client has made a mistake or the history they knew about no longer exists + * call back with EINVAL + 2. if you did not provide a lastKnownHash + * and there are fewer than two checkpoints: + * return 0 (read from the start of the file) + * and there are two or more checkpoints: + * return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant + 3. if you did provide a lastKnownHash + * read through the log until you find the hash that you're looking for + * call back with either the byte offset of the message that you found OR + * -1 if you didn't find it + +*/ +const getHistoryOffset = (Env, channelName, lastKnownHash, cb) => { + const store = Env.store; + const Log = Env.Log; + + // lastKnownhash === -1 means we want the complete history + if (lastKnownHash === -1) { return void cb(null, 0); } + let offset = -1; + nThen((waitFor) => { + getIndex(Env, channelName, waitFor((err, index) => { + if (err) { waitFor.abort(); return void cb(err); } + + // check if the "hash" the client is requesting exists in the index + const lkh = index.offsetByHash[lastKnownHash]; + // we evict old hashes from the index as new checkpoints are discovered. + // if someone connects and asks for a hash that is no longer relevant, + // we tell them it's an invalid request. This is because of the semantics of "GET_HISTORY" + // which is only ever used when connecting or reconnecting in typical uses of history... + // this assumption should hold for uses by chainpad, but perhaps not for other uses cases. + // EXCEPT: other cases don't use checkpoints! + // clients that are told that their request is invalid should just make another request + // without specifying the hash, and just trust the server to give them the relevant data. + // QUESTION: does this mean mailboxes are causing the server to store too much stuff in memory? + if (lastKnownHash && typeof(lkh) !== "number") { + waitFor.abort(); + return void cb(new Error('EINVAL')); + } + + // Since last 2 checkpoints + if (!lastKnownHash) { + waitFor.abort(); + // Less than 2 checkpoints in the history: return everything + if (index.cpIndex.length < 2) { return void cb(null, 0); } + // Otherwise return the second last checkpoint's index + return void cb(null, index.cpIndex[0].offset); + /* LATER... + in practice, two checkpoints can be very close together + we have measures to avoid duplicate checkpoints, but editors + can produce nearby checkpoints which are slightly different, + and slip past these protections. To be really careful, we can + seek past nearby checkpoints by some number of patches so as + to ensure that all editors have sufficient knowledge of history + to reconcile their differences. */ + } + + offset = lkh; + })); + }).nThen((waitFor) => { + // if offset is less than zero then presumably the channel has no messages + // returning falls through to the next block and therefore returns -1 + if (offset !== -1) { return; } + + // do a lookup from the index + // FIXME maybe we don't need this anymore? + // otherwise we have a non-negative offset and we can start to read from there + store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { + // tryParse return a parsed message or undefined + const msg = tryParse(Env, msgObj.buff.toString('utf8')); + // if it was undefined then go onto the next message + if (typeof msg === "undefined") { return readMore(); } + if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4], Log)) { + return void readMore(); + } + offset = msgObj.offset; + abort(); + }, waitFor(function (err) { + if (err) { waitFor.abort(); return void cb(err); } + })); + }).nThen(() => { + cb(null, offset); + }); +}; + +/* getHistoryAsync + * finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' + * streams through the rest of the messages, safely parsing them and returning the parsed content to the handler + * calls back when it has reached the end of the log + + Used by: + * GET_HISTORY + +*/ +const getHistoryAsync = (Env, channelName, lastKnownHash, beforeHash, handler, cb) => { + const store = Env.store; + + let offset = -1; + nThen((waitFor) => { + getHistoryOffset(Env, channelName, lastKnownHash, waitFor((err, os) => { + if (err) { + waitFor.abort(); + return void cb(err); + } + offset = os; + })); + }).nThen((waitFor) => { + if (offset === -1) { return void cb(new Error("could not find offset")); } + const start = (beforeHash) ? 0 : offset; + store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { + if (beforeHash && msgObj.offset >= offset) { return void abort(); } + var parsed = tryParse(Env, msgObj.buff.toString('utf8')); + if (!parsed) { return void readMore(); } + handler(parsed, readMore); + }, waitFor(function (err) { + return void cb(err); + })); + }); +}; + +/* getOlderHistory + * allows clients to query for all messages until a known hash is read + * stores all messages in history as they are read + * can therefore be very expensive for memory + * should probably be converted to a streaming interface + + Used by: + * GET_HISTORY_RANGE +*/ +const getOlderHistory = function (Env, channelName, oldestKnownHash, cb) { + const store = Env.store; + const Log = Env.Log; + var messageBuffer = []; + var found = false; + store.getMessages(channelName, function (msgStr) { + if (found) { return; } + + let parsed = tryParse(Env, msgStr); + if (typeof parsed === "undefined") { return; } + + // identify classic metadata messages by their inclusion of a channel. + // and don't send metadata, since: + // 1. the user won't be interested in it + // 2. this metadata is potentially incomplete/incorrect + if (isMetadataMessage(parsed)) { return; } + + var content = parsed[4]; + if (typeof(content) !== 'string') { return; } + + var hash = getHash(content, Log); + if (hash === oldestKnownHash) { + found = true; + } + messageBuffer.push(parsed); + }, function (err) { + if (err) { + Log.error("HK_GET_OLDER_HISTORY", err); + } + cb(messageBuffer); + }); +}; + +const handleRPC = function (Env, Server, seq, userId, parsed) { + const HISTORY_KEEPER_ID = Env.id; + + /* RPC Calls... */ + var rpc_call = parsed.slice(1); + + Server.send(userId, [seq, 'ACK']); + try { + // slice off the sequence number and pass in the rest of the message + Env.rpc(Server, rpc_call, function (err, output) { + if (err) { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]); + return; + } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]); + }); + } catch (e) { + // if anything throws in the middle, send an error + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); + } +}; + +const handleGetHistory = function (Env, Server, seq, userId, parsed) { + const store = Env.store; + const tasks = Env.tasks; + const metadata_cache = Env.metadata_cache; + const channel_cache = Env.channel_cache; + const HISTORY_KEEPER_ID = Env.id; + const Log = Env.Log; + + // parsed[1] is the channel id + // parsed[2] is a validation key or an object containing metadata (optionnal) + // parsed[3] is the last known hash (optionnal) + + Server.send(userId, [seq, 'ACK']); + var channelName = parsed[1]; + var config = parsed[2]; + var metadata = {}; + var lastKnownHash; + + // clients can optionally pass a map of attributes + // if the channel already exists this map will be ignored + // otherwise it will be stored as the initial metadata state for the channel + if (config && typeof config === "object" && !Array.isArray(parsed[2])) { + lastKnownHash = config.lastKnownHash; + metadata = config.metadata || {}; + if (metadata.expire) { + metadata.expire = +metadata.expire * 1000 + (+new Date()); + } + } + metadata.channel = channelName; + metadata.created = +new Date(); + + // if the user sends us an invalid key, we won't be able to validate their messages + // so they'll never get written to the log anyway. Let's just drop their message + // on the floor instead of doing a bunch of extra work + // TODO send them an error message so they know something is wrong + if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { + return void Log.error('HK_INVALID_KEY', metadata.validateKey); + } + + nThen(function (waitFor) { + var w = waitFor(); + + /* unless this is a young channel, we will serve all messages from an offset + this will not include the channel metadata, so we need to explicitly fetch that. + unfortunately, we can't just serve it blindly, since then young channels will + send the metadata twice, so let's do a quick check of what we're going to serve... + */ + getIndex(Env, channelName, waitFor((err, index) => { + /* if there's an error here, it should be encountered + and handled by the next nThen block. + so, let's just fall through... + */ + if (err) { return w(); } + + + // it's possible that the channel doesn't have metadata + // but in that case there's no point in checking if the channel expired + // or in trying to send metadata, so just skip this block + if (!index || !index.metadata) { return void w(); } + // And then check if the channel is expired. If it is, send the error and abort + // FIXME this is hard to read because 'checkExpired' has side effects + if (checkExpired(Env, Server, channelName)) { return void waitFor.abort(); } + // always send metadata with GET_HISTORY requests + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(index.metadata)], w); + })); + }).nThen(() => { + let msgCount = 0; + + // TODO compute lastKnownHash in a manner such that it will always skip past the metadata line? + getHistoryAsync(Env, channelName, lastKnownHash, false, (msg, readMore) => { + msgCount++; + // avoid sending the metadata message a second time + if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore); + }, (err) => { + if (err && err.code !== 'ENOENT') { + if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); } + const parsedMsg = {error:err.message, channel: channelName}; + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + return; + } + + const chan = channel_cache[channelName]; + + if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) { + metadata_cache[channelName] = metadata; + + // the index will have already been constructed and cached at this point + // but it will not have detected any metadata because it hasn't been written yet + // this means that the cache starts off as invalid, so we have to correct it + if (chan && chan.index) { chan.index.metadata = metadata; } + + // new channels will always have their metadata written to a dedicated metadata log + // but any lines after the first which are not amendments in a particular format will be ignored. + // Thus we should be safe from race conditions here if just write metadata to the log as below... + // TODO validate this logic + // otherwise maybe we need to check that the metadata log is empty as well + store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { + if (err) { + // FIXME tell the user that there was a channel error? + return void Log.error('HK_WRITE_METADATA', { + channel: channelName, + error: err, + }); + } + }); + + // write tasks + if(metadata.expire && typeof(metadata.expire) === 'number') { + // the fun part... + // the user has said they want this pad to expire at some point + tasks.write(metadata.expire, "EXPIRE", [ channelName ], function (err) { + if (err) { + // if there is an error, we don't want to crash the whole server... + // just log it, and if there's a problem you'll be able to fix it + // at a later date with the provided information + Log.error('HK_CREATE_EXPIRE_TASK', err); + Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); + } + }); + } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]); + } + + // End of history message: + let parsedMsg = {state: 1, channel: channelName}; + + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + }); + }); +}; + +const handleGetHistoryRange = function (Env, Server, seq, userId, parsed) { + var channelName = parsed[1]; + var map = parsed[2]; + const HISTORY_KEEPER_ID = Env.id; + + if (!(map && typeof(map) === 'object')) { + return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); + } + + var oldestKnownHash = map.from; + var desiredMessages = map.count; + var desiredCheckpoint = map.cpCount; + var txid = map.txid; + if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { + return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); + } + + if (!txid) { + return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); + } + + Server.send(userId, [seq, 'ACK']); + return void getOlderHistory(Env, channelName, oldestKnownHash, function (messages) { + var toSend = []; + if (typeof (desiredMessages) === "number") { + toSend = messages.slice(-desiredMessages); + } else { + let cpCount = 0; + for (var i = messages.length - 1; i >= 0; i--) { + if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { + cpCount++; + } + toSend.unshift(messages[i]); + if (cpCount >= desiredCheckpoint) { break; } + } + } + toSend.forEach(function (msg) { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, + JSON.stringify(['HISTORY_RANGE', txid, msg])]); + }); + + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, + JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) + ]); + }); +}; + +const handleGetFullHistory = function (Env, Server, seq, userId, parsed) { + const HISTORY_KEEPER_ID = Env.id; + const Log = Env.Log; + + // parsed[1] is the channel id + // parsed[2] is a validation key (optionnal) + // parsed[3] is the last known hash (optionnal) + + Server.send(userId, [seq, 'ACK']); + + // FIXME should we send metadata here too? + // none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22) + return void getHistoryAsync(Env, parsed[1], -1, false, (msg, readMore) => { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore); + }, (err) => { + let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; + if (err) { + Log.error('HK_GET_FULL_HISTORY', err.stack); + parsedMsg = ['ERROR', parsed[1], err.message]; + } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + }); +}; + +const directMessageCommands = { + GET_HISTORY: handleGetHistory, + GET_HISTORY_RANGE: handleGetHistoryRange, + GET_FULL_HISTORY: handleGetFullHistory, +}; + +/* onDirectMessage + * exported for use by the netflux-server + * parses and handles all direct messages directed to the history keeper + * check if it's expired and execute all the associated side-effects + * routes queries to the appropriate handlers +*/ +HK.onDirectMessage = function (Env, Server, seq, userId, json) { + const Log = Env.Log; + Log.silly('HK_MESSAGE', json); + + let parsed; + try { + parsed = JSON.parse(json[2]); + } catch (err) { + Log.error("HK_PARSE_CLIENT_MESSAGE", json); + return; + } + + // If the requested history is for an expired channel, abort + // Note the if we don't have the keys for that channel in metadata_cache, we'll + // have to abort later (once we know the expiration time) + if (checkExpired(Env, Server, parsed[1])) { return; } + + // look up the appropriate command in the map of commands or fall back to RPC + var command = directMessageCommands[parsed[0]] || handleRPC; + + // run the command with the standard function signature + command(Env, Server, seq, userId, parsed); +}; + +/* onChannelMessage + Determine what we should store when a message a broadcasted to a channel" + + * ignores ephemeral channels + * ignores messages sent to expired channels + * rejects duplicated checkpoints + * validates messages to channels that have validation keys + * caches the id of the last saved checkpoint + * adds timestamps to incoming messages + * writes messages to the store +*/ +HK.onChannelMessage = function (Env, Server, channel, msgStruct) { + const Log = Env.Log; + + // TODO our usage of 'channel' here looks prone to errors + // we only use it for its 'id', but it can contain other stuff + // also, we're using this RPC from both the RPC and Netflux-server + // we should probably just change this to expect a channel id directly + + // don't store messages if the channel id indicates that it's an ephemeral message + if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } + + const isCp = /^cp\|/.test(msgStruct[4]); + let id; + if (isCp) { + // id becomes either null or an array or results... + id = CHECKPOINT_PATTERN.exec(msgStruct[4]); + if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { + // Reject duplicate checkpoints + return; + } + } + + let metadata; + nThen(function (w) { + // getIndex (and therefore the latest metadata) + getIndex(Env, channel.id, w(function (err, index) { + if (err) { + w.abort(); + return void Log.error('CHANNEL_MESSAGE_ERROR', err); + } + + if (!index.metadata) { + // if there's no channel metadata then it can't be an expiring channel + // nor can we possibly validate it + return; + } + + metadata = index.metadata; + + // don't write messages to expired channels + if (checkExpired(Env, Server, channel)) { return void w.abort(); } + + // if there's no validateKey present skip to the next block + if (!metadata.validateKey) { return; } + + // trim the checkpoint indicator off the message if it's present + let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; + // convert the message from a base64 string into a Uint8Array + + // FIXME this can fail and the client won't notice + signedMsg = Nacl.util.decodeBase64(signedMsg); + + // FIXME this can blow up + // TODO check that that won't cause any problems other than not being able to append... + const validateKey = Nacl.util.decodeBase64(metadata.validateKey); + // validate the message + const validated = Nacl.sign.open(signedMsg, validateKey); + if (!validated) { + // don't go any further if the message fails validation + w.abort(); + Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); + return; + } + })); + }).nThen(function () { + // do checkpoint stuff... + + // 1. get the checkpoint id + // 2. reject duplicate checkpoints + + if (isCp) { + // if the message is a checkpoint we will have already validated + // that it isn't a duplicate. remember its id so that we can + // repeat this process for the next incoming checkpoint + + // WARNING: the fact that we only check the most recent checkpoints + // is a potential source of bugs if one editor has high latency and + // pushes a duplicate of an earlier checkpoint than the latest which + // has been pushed by editors with low latency + // FIXME + if (Array.isArray(id) && id[2]) { + // Store new checkpoint hash + channel.lastSavedCp = id[2]; + } + } + + // add the time to the message + msgStruct.push(now()); + + // storeMessage + storeMessage(Env, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log)); + }); +}; + + diff --git a/lib/metadata.js b/lib/metadata.js index de40043af..2b3a0b737 100644 --- a/lib/metadata.js +++ b/lib/metadata.js @@ -211,12 +211,14 @@ Meta.createLineHandler = function (ref, errorHandler) { line: JSON.stringify(line), }); } + + // the case above is special, everything else should increment the index + var index = ref.index++; if (typeof(line) === 'undefined') { return; } if (Array.isArray(line)) { try { handleCommand(ref.meta, line); - ref.index++; } catch (err2) { errorHandler("METADATA_COMMAND_ERR", { error: err2.stack, @@ -226,8 +228,15 @@ Meta.createLineHandler = function (ref, errorHandler) { return; } - if (ref.index === 0 && typeof(line) === 'object') { - ref.index++; + // the first line of a channel is processed before the dedicated metadata log. + // it can contain a map, in which case it should be used as the initial state. + // it's possible that a trim-history command was interrupted, in which case + // this first message might exist in parallel with the more recent metadata log + // which will contain the computed state of the previous metadata log + // which has since been archived. + // Thus, accept both the first and second lines you process as valid initial state + // preferring the second if it exists + if (index < 2 && line && typeof(line) === 'object') { // special case! ref.meta = line; return; @@ -235,7 +244,7 @@ Meta.createLineHandler = function (ref, errorHandler) { errorHandler("METADATA_HANDLER_WEIRDLINE", { line: line, - index: ref.index++, + index: index, }); }; }; diff --git a/lib/rpc.js b/lib/rpc.js new file mode 100644 index 000000000..23597eb5b --- /dev/null +++ b/lib/rpc.js @@ -0,0 +1,306 @@ +/*jshint esversion: 6 */ +const nThen = require("nthen"); + +const Util = require("./common-util"); +const mkEvent = Util.mkEvent; + +const Core = require("./commands/core"); +const Admin = require("./commands/admin-rpc"); +const Pinning = require("./commands/pin-rpc"); +const Quota = require("./commands/quota"); +const Block = require("./commands/block"); +const Metadata = require("./commands/metadata"); +const Channel = require("./commands/channel"); +const Upload = require("./commands/upload"); + +var RPC = module.exports; + +const Store = require("../storage/file"); +const BlobStore = require("../storage/blob"); + +const UNAUTHENTICATED_CALLS = { + GET_FILE_SIZE: Pinning.getFileSize, + GET_MULTIPLE_FILE_SIZE: Pinning.getMultipleFileSize, + GET_DELETED_PADS: Pinning.getDeletedPads, + IS_CHANNEL_PINNED: Pinning.isChannelPinned, + IS_NEW_CHANNEL: Channel.isNewChannel, + WRITE_PRIVATE_MESSAGE: Channel.writePrivateMessage, + GET_METADATA: Metadata.getMetadata, +}; + +var isUnauthenticateMessage = function (msg) { + return msg && msg.length === 2 && typeof(UNAUTHENTICATED_CALLS[msg[0]]) === 'function'; +}; + +var handleUnauthenticatedMessage = function (Env, msg, respond, Server) { + Env.Log.silly('LOG_RPC', msg[0]); + + var method = UNAUTHENTICATED_CALLS[msg[0]]; + method(Env, msg[1], function (err, value) { + if (err) { + Env.WARN(err, msg[1]); + return void respond(err); + } + respond(err, [null, value, null]); + }, Server); +}; + +const AUTHENTICATED_USER_TARGETED = { + RESET: Pinning.resetUserPins, + PIN: Pinning.pinChannel, + UNPIN: Pinning.unpinChannel, + CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel, + REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel, + TRIM_HISTORY: Channel.trimHistory, + UPLOAD_STATUS: Upload.status, + UPLOAD: Upload.upload, + UPLOAD_COMPLETE: Upload.complete, + UPLOAD_CANCEL: Upload.cancel, + OWNED_UPLOAD_COMPLETE: Upload.complete_owned, + WRITE_LOGIN_BLOCK: Block.writeLoginBlock, + REMOVE_LOGIN_BLOCK: Block.removeLoginBlock, + ADMIN: Admin.command, + SET_METADATA: Metadata.setMetadata, +}; + +const AUTHENTICATED_USER_SCOPED = { + GET_HASH: Pinning.getHash, + GET_TOTAL_SIZE: Pinning.getTotalSize, + UPDATE_LIMITS: Quota.getUpdatedLimit, + GET_LIMIT: Pinning.getLimit, + EXPIRE_SESSION: Core.expireSessionAsync, + REMOVE_PINS: Pinning.removePins, + TRIM_PINS: Pinning.trimPins, + COOKIE: Core.haveACookie, +}; + +var isAuthenticatedCall = function (call) { + if (call === 'UPLOAD') { return false; } + return typeof(AUTHENTICATED_USER_TARGETED[call] || AUTHENTICATED_USER_SCOPED[call]) === 'function'; +}; + +var handleAuthenticatedMessage = function (Env, unsafeKey, msg, respond, Server) { + /* If you have gotten this far, you have signed the message with the + public key which you provided. + */ + + var safeKey = Util.escapeKeyCharacters(unsafeKey); + + var Respond = function (e, value) { + var session = Env.Sessions[safeKey]; + var token = session? session.tokens.slice(-1)[0]: ''; + var cookie = Core.makeCookie(token).join('|'); + respond(e ? String(e): e, [cookie].concat(typeof(value) !== 'undefined' ?value: [])); + }; + + msg.shift(); + // discard validated cookie from message + if (!msg.length) { + return void Respond('INVALID_MSG'); + } + + var TYPE = msg[0]; + + Env.Log.silly('LOG_RPC', TYPE); + + if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') { + return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) { + Env.WARN(e, value); + return void Respond(e, value); + }, Server); + } + + if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') { + return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) { + if (e) { + Env.WARN(e, safeKey); + return void Respond(e); + } + Respond(e, value); + }); + } + + return void Respond('UNSUPPORTED_RPC_CALL', msg); +}; + +var rpc = function (Env, Server, data, respond) { + if (!Array.isArray(data)) { + Env.Log.debug('INVALID_ARG_FORMET', data); + return void respond('INVALID_ARG_FORMAT'); + } + + if (!data.length) { + return void respond("INSUFFICIENT_ARGS"); + } else if (data.length !== 1) { + Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); + } + + var msg = data[0].slice(0); + + if (!Array.isArray(msg)) { + return void respond('INVALID_ARG_FORMAT'); + } + + if (isUnauthenticateMessage(msg)) { + return handleUnauthenticatedMessage(Env, msg, respond, Server); + } + + var signature = msg.shift(); + var publicKey = msg.shift(); + + // make sure a user object is initialized in the cookie jar + if (publicKey) { + Core.getSession(Env.Sessions, publicKey); + } else { + Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); + } + + var cookie = msg[0]; + if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) { + // no cookie is fine if the RPC is to get a cookie + if (msg[1] !== 'COOKIE') { + return void respond('NO_COOKIE'); + } + } + + var serialized = JSON.stringify(msg); + + if (!(serialized && typeof(publicKey) === 'string')) { + return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); + } + + var command = msg[1]; + + if (command === 'UPLOAD') { + // UPLOAD is a special case that skips signature validation + // intentional fallthrough behaviour + return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); + } + if (isAuthenticatedCall(command)) { + // check the signature on the message + // refuse the command if it doesn't validate + if (Core.checkSignature(Env, serialized, signature, publicKey) === true) { + return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); + } + return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); + } + Env.Log.warn('INVALID_RPC_CALL', command); + return void respond("INVALID_RPC_CALL"); +}; + +RPC.create = function (config, cb) { + var Log = config.log; + + // load pin-store... + Log.silly('LOADING RPC MODULE'); + + var keyOrDefaultString = function (key, def) { + return typeof(config[key]) === 'string'? config[key]: def; + }; + + var WARN = function (e, output) { + if (e && output) { + Log.warn(e, { + output: output, + message: String(e), + stack: new Error(e).stack, + }); + } + }; + + if (typeof(config.domain) !== 'undefined') { + throw new Error('fuck'); + } + + var Env = { + historyKeeper: config.historyKeeper, + intervals: config.intervals || {}, + maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), + Sessions: {}, + paths: {}, + msgStore: config.store, + pinStore: undefined, + pinnedPads: {}, + evPinnedPadsReady: mkEvent(true), + limits: {}, + admins: [], + Log: Log, + WARN: WARN, + flushCache: config.flushCache, + adminEmail: config.adminEmail, + allowSubscriptions: config.allowSubscriptions, + myDomain: config.myDomain, + mySubdomain: config.mySubdomain, + customLimits: config.customLimits, + // FIXME this attribute isn't in the default conf + // but it is referenced in Quota + domain: config.domain + }; + + Env.defaultStorageLimit = typeof(config.defaultStorageLimit) === 'number' && config.defaultStorageLimit > 0? + config.defaultStorageLimit: + Core.DEFAULT_LIMIT; + + try { + Env.admins = (config.adminKeys || []).map(function (k) { + k = k.replace(/\/+$/, ''); + var s = k.split('/'); + return s[s.length-1]; + }); + } catch (e) { + console.error("Can't parse admin keys. Please update or fix your config.js file!"); + } + + var Sessions = Env.Sessions; + var paths = Env.paths; + var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); + paths.block = keyOrDefaultString('blockPath', './block'); + paths.data = keyOrDefaultString('filePath', './datastore'); + paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); + paths.blob = keyOrDefaultString('blobPath', './blob'); + + var updateLimitDaily = function () { + Quota.updateCachedLimits(Env, function (e) { + if (e) { + WARN('limitUpdate', e); + } + }); + }; + Quota.applyCustomLimits(Env); + updateLimitDaily(); + Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000); + + Pinning.loadChannelPins(Env); + + nThen(function (w) { + Store.create({ + filePath: pinPath, + }, w(function (s) { + Env.pinStore = s; + })); + BlobStore.create({ + blobPath: config.blobPath, + blobStagingPath: config.blobStagingPath, + archivePath: config.archivePath, + getSession: function (safeKey) { + return Core.getSession(Sessions, safeKey); + }, + }, w(function (err, blob) { + if (err) { throw new Error(err); } + Env.blobStore = blob; + })); + }).nThen(function () { + cb(void 0, function (Server, data, respond) { + try { + return rpc(Env, Server, data, respond); + } catch (e) { + console.log("Error from RPC with data " + JSON.stringify(data)); + console.log(e.stack); + } + }); + // expire old sessions once per minute + Env.intervals.sessionExpirationInterval = setInterval(function () { + Core.expireSessions(Sessions); + }, Core.SESSION_EXPIRATION_TIME); + }); +}; diff --git a/lib/schedule.js b/lib/schedule.js new file mode 100644 index 000000000..1fdef8cce --- /dev/null +++ b/lib/schedule.js @@ -0,0 +1,172 @@ +var WriteQueue = require("./write-queue"); +var Util = require("./common-util"); + +/* This module provides implements a FIFO scheduler + which assumes the existence of three types of async tasks: + + 1. ordered tasks which must be executed sequentially + 2. unordered tasks which can be executed in parallel + 3. blocking tasks which must block the execution of all other tasks + + The scheduler assumes there will be many resources identified by strings, + and that the constraints described above will only apply in the context + of identical string ids. + + Many blocking tasks may be executed in parallel so long as they + concern resources identified by different ids. + +USAGE: + + const schedule = require("./schedule")(); + + // schedule two sequential tasks using the resource 'pewpew' + schedule.ordered('pewpew', function (next) { + appendToFile('beep\n', next); + }); + schedule.ordered('pewpew', function (next) { + appendToFile('boop\n', next); + }); + + // schedule a task that can happen whenever + schedule.unordered('pewpew', function (next) { + displayFileSize(next); + }); + + // schedule a blocking task which will wait + // until the all unordered tasks have completed before commencing + schedule.blocking('pewpew', function (next) { + deleteFile(next); + }); + + // this will be queued for after the blocking task + schedule.ordered('pewpew', function (next) { + appendFile('boom', next); + }); + +*/ + +// return a uid which is not already in a map +var unusedUid = function (set) { + var uid = Util.uid(); + if (set[uid]) { return unusedUid(); } + return uid; +}; + +// return an existing session, creating one if it does not already exist +var lookup = function (map, id) { + return (map[id] = map[id] || { + //blocking: [], + active: {}, + blocked: {}, + }); +}; + +var isEmpty = function (map) { + for (var key in map) { + if (map.hasOwnProperty(key)) { return false; } + } + return true; +}; + +module.exports = function () { + // every scheduler instance has its own queue + var queue = WriteQueue(); + + // ordered tasks don't require any extra logic + var Ordered = function (id, task) { + queue(id, task); + }; + + // unordered and blocking tasks need a little extra state + var map = {}; + + // regular garbage collection keeps memory consumption low + var collectGarbage = function (id) { + // avoid using 'lookup' since it creates a session implicitly + var local = map[id]; + // bail out if no session + if (!local) { return; } + // bail out if there are blocking or active tasks + if (local.lock) { return; } + if (!isEmpty(local.active)) { return; } + // if there are no pending actions then delete the session + delete map[id]; + }; + + // unordered tasks run immediately if there are no blocking tasks scheduled + // or immediately after blocking tasks finish + var runImmediately = function (local, task) { + // set a flag in the map of active unordered tasks + // to prevent blocking tasks from running until you finish + var uid = unusedUid(local.active); + local.active[uid] = true; + + task(function () { + // remove the flag you set to indicate that your task completed + delete local.active[uid]; + // don't do anything if other unordered tasks are still running + if (!isEmpty(local.active)) { return; } + // bail out if there are no blocking tasks scheduled or ready + if (typeof(local.waiting) !== 'function') { + return void collectGarbage(); + } + setTimeout(local.waiting); + }); + }; + + var runOnceUnblocked = function (local, task) { + var uid = unusedUid(local.blocked); + local.blocked[uid] = function () { + runImmediately(local, task); + }; + }; + + // 'unordered' tasks are scheduled to run in after the most recently received blocking task + // or immediately and in parallel if there are no blocking tasks scheduled. + var Unordered = function (id, task) { + var local = lookup(map, id); + if (local.lock) { return runOnceUnblocked(local, task); } + runImmediately(local, task); + }; + + var runBlocked = function (local) { + for (var task in local.blocked) { + runImmediately(local, local.blocked[task]); + } + }; + + // 'blocking' tasks must be run alone. + // They are queued alongside ordered tasks, + // and wait until any running 'unordered' tasks complete before commencing. + var Blocking = function (id, task) { + var local = lookup(map, id); + + queue(id, function (next) { + // start right away if there are no running unordered tasks + if (isEmpty(local.active)) { + local.lock = true; + return void task(function () { + delete local.lock; + runBlocked(local); + next(); + }); + } + // otherwise wait until the running tasks have completed + local.waiting = function () { + local.lock = true; + task(function () { + delete local.lock; + delete local.waiting; + runBlocked(local); + next(); + }); + }; + }); + }; + + return { + ordered: Ordered, + unordered: Unordered, + blocking: Blocking, + }; +}; diff --git a/package-lock.json b/package-lock.json index eb4668a33..d4a119d75 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "cryptpad", - "version": "3.10.0", + "version": "3.11.0", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -99,9 +99,9 @@ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "chainpad-crypto": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.2.tgz", - "integrity": "sha512-7MJ7qPz/C4sJPsDhPMjdSRmliOCPoRO0XM1vUomcgXA6HINlW+if9AAt/H4q154nYhZ/b57njgC6cWgd/RDidg==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.4.tgz", + "integrity": "sha512-fWbVyeAv35vf/dkkQaefASlJcEfpEvfRI23Mtn+/TBBry7+LYNuJMXJiovVY35pfyw2+trKh1Py5Asg9vrmaVg==", "requires": { "tweetnacl": "git://github.com/dchest/tweetnacl-js.git#v0.12.2" }, @@ -113,14 +113,13 @@ } }, "chainpad-server": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-3.0.5.tgz", - "integrity": "sha512-USKOMSHsNjnme81Qy3nQ+ji9eCkBPokYH4T82LVHAI0aayTSCXcTPUDLVGDBCRqe8NsXU4io1WPXn1KiZwB8fA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-4.0.2.tgz", + "integrity": "sha512-9NrFsATd70uAdksxsCZBIJ/SiREmJ6QLYTNaeFLH/nJpeZ2b7wblVGABCj3JYWvngdEZ7Umc+afbWH8sUmtgeQ==", "requires": { - "nthen": "^0.1.8", + "nthen": "0.1.8", "pull-stream": "^3.6.9", "stream-to-pull-stream": "^1.7.3", - "tweetnacl": "~0.12.2", "ws": "^3.3.1" } }, @@ -161,9 +160,9 @@ "dev": true }, "commander": { - "version": "2.20.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz", - "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==", + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "dev": true }, "concat-map": { @@ -241,9 +240,9 @@ } }, "dom-serializer": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.1.tgz", - "integrity": "sha512-sK3ujri04WyjwQXVoK4PU3y8ula1stq10GJZpqHIUgoGZdsGzAGu65BnU3d08aTVSvO7mGPZUc0wTEDL+qGE0Q==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", "dev": true, "requires": { "domelementtype": "^2.0.1", @@ -398,15 +397,9 @@ } }, "flatten": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.2.tgz", - "integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=", - "dev": true - }, - "flow-bin": { - "version": "0.59.0", - "resolved": "https://registry.npmjs.org/flow-bin/-/flow-bin-0.59.0.tgz", - "integrity": "sha512-yJDRffvby5mCTkbwOdXwiGDjeea8Z+BPVuP53/tHqHIZC+KtQD790zopVf7mHk65v+wRn+TZ7tkRSNA9oDmyLg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.3.tgz", + "integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==", "dev": true }, "forwarded": { @@ -450,9 +443,9 @@ } }, "glob": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz", - "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==", + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", "dev": true, "requires": { "fs.realpath": "^1.0.0", @@ -478,9 +471,9 @@ } }, "graceful-fs": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.2.tgz", - "integrity": "sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q==" + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==" }, "has-ansi": { "version": "2.0.0", @@ -593,9 +586,9 @@ "dev": true }, "jshint": { - "version": "2.10.2", - "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.10.2.tgz", - "integrity": "sha512-e7KZgCSXMJxznE/4WULzybCMNXNAd/bf5TSrvVEq78Q/K8ZwFpmBqQeDtNiHc3l49nV4E/+YeHU/JZjSUIrLAA==", + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.11.0.tgz", + "integrity": "sha512-ooaD/hrBPhu35xXW4gn+o3SOuzht73gdBuffgJzrZBJZPGgGiiTvJEgTyxFvBO2nz0+X1G6etF8SzUODTlLY6Q==", "dev": true, "requires": { "cli": "~1.0.0", @@ -635,9 +628,9 @@ "dev": true }, "readable-stream": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, "requires": { "core-util-is": "~1.0.0", @@ -766,16 +759,16 @@ "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" }, "mime-db": { - "version": "1.40.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz", - "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA==" + "version": "1.43.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.43.0.tgz", + "integrity": "sha512-+5dsGEEovYbT8UY9yD7eE4XTc4UwJ1jBYlgaQQF38ENsKR3wj/8q8RFZrF9WIZpB2V1ArTVFUva8sAul1NzRzQ==" }, "mime-types": { - "version": "2.1.24", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz", - "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==", + "version": "2.1.26", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.26.tgz", + "integrity": "sha512-01paPWYgLrkqAyrlDorC1uDwl2p3qZT7yl806vW7DvDoxwXi46jsjFbg+WdwotBIk6/MbEhO/dh5aZ5sNj/dWQ==", "requires": { - "mime-db": "1.40.0" + "mime-db": "1.43.0" } }, "minimatch": { @@ -848,9 +841,9 @@ "dev": true }, "pako": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.10.tgz", - "integrity": "sha512-0DTvPVU3ed8+HNXOu5Bs+o//Mbdj9VNQMUOe9oKCwh8l0GNwpTDMKCWbRjgtD291AWnkAgkqA/LOnQS8AmS1tw==", + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", "dev": true }, "parseurl": { @@ -1305,19 +1298,19 @@ } }, "xml2js": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", - "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "version": "0.4.23", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz", + "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==", "dev": true, "requires": { "sax": ">=0.6.0", - "xmlbuilder": "~9.0.1" + "xmlbuilder": "~11.0.0" } }, "xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", "dev": true } } diff --git a/package.json b/package.json index fa4353662..81be1c7f8 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "cryptpad", "description": "realtime collaborative visual editor with zero knowlege server", - "version": "3.10.0", + "version": "3.11.0", "license": "AGPL-3.0+", "repository": { "type": "git", @@ -13,7 +13,7 @@ }, "dependencies": { "chainpad-crypto": "^0.2.2", - "chainpad-server": "^3.0.5", + "chainpad-server": "^4.0.0", "express": "~4.16.0", "fs-extra": "^7.0.0", "get-folder-size": "^2.0.1", @@ -27,7 +27,6 @@ "ws": "^3.3.1" }, "devDependencies": { - "flow-bin": "^0.59.0", "jshint": "^2.10.2", "less": "2.7.1", "lesshint": "^4.5.0", @@ -40,8 +39,8 @@ "package": "PACKAGE=1 node server.js", "lint": "jshint --config .jshintrc --exclude-path .jshintignore . && ./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/", "lint:js": "jshint --config .jshintrc --exclude-path .jshintignore .", + "lint:server": "jshint --config .jshintrc lib", "lint:less": "./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/", - "flow": "./node_modules/.bin/flow", "test": "node scripts/TestSelenium.js", "test-rpc": "cd scripts/tests && node test-rpc", "template": "cd customize.dist/src && for page in ../index.html ../privacy.html ../terms.html ../about.html ../contact.html ../what-is-cryptpad.html ../features.html ../../www/login/index.html ../../www/register/index.html ../../www/user/index.html;do echo $page; cp template.html $page; done;", diff --git a/rpc.js b/rpc.js deleted file mode 100644 index fcd85a390..000000000 --- a/rpc.js +++ /dev/null @@ -1,1766 +0,0 @@ -/*@flow*/ -/*jshint esversion: 6 */ -/* Use Nacl for checking signatures of messages */ -var Nacl = require("tweetnacl/nacl-fast"); - -/* globals Buffer*/ -/* globals process */ - -var Fs = require("fs"); - -var Fse = require("fs-extra"); -var Path = require("path"); -var Https = require("https"); -const Package = require('./package.json'); -const Pinned = require('./scripts/pinned'); -const Saferphore = require("saferphore"); -const nThen = require("nthen"); -const getFolderSize = require("get-folder-size"); -const Pins = require("./lib/pins"); -const Meta = require("./lib/metadata"); -const WriteQueue = require("./lib/write-queue"); -const BatchRead = require("./lib/batch-read"); - -const Util = require("./lib/common-util"); -const escapeKeyCharacters = Util.escapeKeyCharacters; -const unescapeKeyCharacters = Util.unescapeKeyCharacters; -const mkEvent = Util.mkEvent; - -var RPC = module.exports; - -var Store = require("./storage/file"); -var BlobStore = require("./storage/blob"); - -var DEFAULT_LIMIT = 50 * 1024 * 1024; -var SESSION_EXPIRATION_TIME = 60 * 1000; - -var Log; - -var WARN = function (e, output) { - if (e && output) { - Log.warn(e, { - output: output, - message: String(e), - stack: new Error(e).stack, - }); - } -}; - -var isValidId = function (chan) { - return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && - [32, 48].indexOf(chan.length) > -1; -}; - -var makeToken = function () { - return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) - .toString(16); -}; - -var makeCookie = function (token) { - var time = (+new Date()); - time -= time % 5000; - - return [ - time, - process.pid, - token - ]; -}; - -var parseCookie = function (cookie) { - if (!(cookie && cookie.split)) { return null; } - - var parts = cookie.split('|'); - if (parts.length !== 3) { return null; } - - var c = {}; - c.time = new Date(parts[0]); - c.pid = Number(parts[1]); - c.seq = parts[2]; - return c; -}; - -var getSession = function (Sessions, key) { - var safeKey = escapeKeyCharacters(key); - if (Sessions[safeKey]) { - Sessions[safeKey].atime = +new Date(); - return Sessions[safeKey]; - } - var user = Sessions[safeKey] = {}; - user.atime = +new Date(); - user.tokens = [ - makeToken() - ]; - return user; -}; - -var isTooOld = function (time, now) { - return (now - time) > 300000; -}; - -var expireSession = function (Sessions, key) { - var session = Sessions[key]; - if (!session) { return; } - if (session.blobstage) { - session.blobstage.close(); - } - delete Sessions[key]; -}; - -var expireSessions = function (Sessions) { - var now = +new Date(); - Object.keys(Sessions).forEach(function (key) { - var session = Sessions[key]; - if (session && isTooOld(session.atime, now)) { - expireSession(Sessions, key); - } - }); -}; - -var addTokenForKey = function (Sessions, publicKey, token) { - if (!Sessions[publicKey]) { throw new Error('undefined user'); } - - var user = getSession(Sessions, publicKey); - user.tokens.push(token); - user.atime = +new Date(); - if (user.tokens.length > 2) { user.tokens.shift(); } -}; - -var isValidCookie = function (Sessions, publicKey, cookie) { - var parsed = parseCookie(cookie); - if (!parsed) { return false; } - - var now = +new Date(); - - if (!parsed.time) { return false; } - if (isTooOld(parsed.time, now)) { - return false; - } - - // different process. try harder - if (process.pid !== parsed.pid) { - return false; - } - - var user = getSession(Sessions, publicKey); - if (!user) { return false; } - - var idx = user.tokens.indexOf(parsed.seq); - if (idx === -1) { return false; } - - if (idx > 0) { - // make a new token - addTokenForKey(Sessions, publicKey, makeToken()); - } - - return true; -}; - -var checkSignature = function (signedMsg, signature, publicKey) { - if (!(signedMsg && publicKey)) { return false; } - - var signedBuffer; - var pubBuffer; - var signatureBuffer; - - try { - signedBuffer = Nacl.util.decodeUTF8(signedMsg); - } catch (e) { - Log.error('INVALID_SIGNED_BUFFER', signedMsg); - return null; - } - - try { - pubBuffer = Nacl.util.decodeBase64(publicKey); - } catch (e) { - return false; - } - - try { - signatureBuffer = Nacl.util.decodeBase64(signature); - } catch (e) { - return false; - } - - if (pubBuffer.length !== 32) { - Log.error('PUBLIC_KEY_LENGTH', publicKey); - return false; - } - - if (signatureBuffer.length !== 64) { - return false; - } - - return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer); -}; - -const batchUserPins = BatchRead("LOAD_USER_PINS"); -var loadUserPins = function (Env, publicKey, cb) { - var session = getSession(Env.Sessions, publicKey); - - if (session.channels) { - return cb(session.channels); - } - - batchUserPins(publicKey, cb, function (done) { - var ref = {}; - var lineHandler = Pins.createLineHandler(ref, function (label, data) { - Log.error(label, { - log: publicKey, - data: data, - }); - }); - - // if channels aren't in memory. load them from disk - Env.pinStore.getMessages(publicKey, lineHandler, function () { - // no more messages - - // only put this into the cache if it completes - session.channels = ref.pins; - done(ref.pins); // FIXME no error handling? - }); - }); -}; - -var truthyKeys = function (O) { - return Object.keys(O).filter(function (k) { - return O[k]; - }); -}; - -var getChannelList = function (Env, publicKey, _cb) { - var cb = Util.once(Util.mkAsync(_cb)); - loadUserPins(Env, publicKey, function (pins) { - cb(truthyKeys(pins)); - }); -}; - -var getFileSize = function (Env, channel, _cb) { - var cb = Util.once(Util.mkAsync(_cb)); - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length === 32) { - if (typeof(Env.msgStore.getChannelSize) !== 'function') { - return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); - } - - return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) { - if (e) { - if (e.code === 'ENOENT') { return void cb(void 0, 0); } - return void cb(e.code); - } - cb(void 0, size); - }); - } - - // 'channel' refers to a file, so you need another API - Env.blobStore.size(channel, function (e, size) { - if (typeof(size) === 'undefined') { return void cb(e); } - cb(void 0, size); - }); -}; - -const batchMetadata = BatchRead("GET_METADATA"); -var getMetadata = function (Env, channel, cb) { - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length !== 32) { return cb("INVALID_CHAN_LENGTH"); } - - batchMetadata(channel, cb, function (done) { - var ref = {}; - var lineHandler = Meta.createLineHandler(ref, Log.error); - - return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) { - if (err) { - // stream errors? - return void done(err); - } - done(void 0, ref.meta); - }); - }); -}; - -/* setMetadata - - write a new line to the metadata log if a valid command is provided - - data is an object: { - channel: channelId, - command: metadataCommand (string), - value: value - } -*/ -var queueMetadata = WriteQueue(); -var setMetadata = function (Env, data, unsafeKey, cb) { - var channel = data.channel; - var command = data.command; - if (!channel || !isValidId(channel)) { return void cb ('INVALID_CHAN'); } - if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); } - if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); } - - queueMetadata(channel, function (next) { - getMetadata(Env, channel, function (err, metadata) { - if (err) { - cb(err); - return void next(); - } - if (!(metadata && Array.isArray(metadata.owners))) { - cb('E_NO_OWNERS'); - return void next(); - } - - // Confirm that the channel is owned by the user in question - // or the user is accepting a pending ownerhsip offer - if (metadata.pending_owners && Array.isArray(metadata.pending_owners) && - metadata.pending_owners.indexOf(unsafeKey) !== -1 && - metadata.owners.indexOf(unsafeKey) === -1) { - - // If you are a pending owner, make sure you can only add yourelf as an owner - if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') - || !Array.isArray(data.value) - || data.value.length !== 1 - || data.value[0] !== unsafeKey) { - cb('INSUFFICIENT_PERMISSIONS'); - return void next(); - } - - } else if (metadata.owners.indexOf(unsafeKey) === -1) { - cb('INSUFFICIENT_PERMISSIONS'); - return void next(); - } - - // Add the new metadata line - var line = [command, data.value, +new Date()]; - var changed = false; - try { - changed = Meta.handleCommand(metadata, line); - } catch (e) { - cb(e); - return void next(); - } - - // if your command is valid but it didn't result in any change to the metadata, - // call back now and don't write any "useless" line to the log - if (!changed) { - cb(void 0, metadata); - return void next(); - } - Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { - if (e) { - cb(e); - return void next(); - } - cb(void 0, metadata); - next(); - }); - }); - }); -}; - -var getMultipleFileSize = function (Env, channels, cb) { - if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); } - if (typeof(Env.msgStore.getChannelSize) !== 'function') { - return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); - } - - var i = channels.length; - var counts = {}; - - var done = function () { - i--; - if (i === 0) { return cb(void 0, counts); } - }; - - channels.forEach(function (channel) { - getFileSize(Env, channel, function (e, size) { - if (e) { - // most likely error here is that a file no longer exists - // but a user still has it in their drive, and wants to know - // its size. We should find a way to inform them of this in - // the future. For now we can just tell them it has no size. - - //WARN('getFileSize', e); - counts[channel] = 0; - return done(); - } - counts[channel] = size; - done(); - }); - }); -}; - -/* accepts a list, and returns a sublist of channel or file ids which seem - to have been deleted from the server (file size 0) - - we might consider that we should only say a file is gone if fs.stat returns - ENOENT, but for now it's simplest to just rely on getFileSize... -*/ -var getDeletedPads = function (Env, channels, cb) { - if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } - var L = channels.length; - - var sem = Saferphore.create(10); - var absentees = []; - - var job = function (channel, wait) { - return function (give) { - getFileSize(Env, channel, wait(give(function (e, size) { - if (e) { return; } - if (size === 0) { absentees.push(channel); } - }))); - }; - }; - - nThen(function (w) { - for (var i = 0; i < L; i++) { - sem.take(job(channels[i], w)); - } - }).nThen(function () { - cb(void 0, absentees); - }); -}; - -const batchTotalSize = BatchRead("GET_TOTAL_SIZE"); -var getTotalSize = function (Env, publicKey, cb) { - var unescapedKey = unescapeKeyCharacters(publicKey); - var limit = Env.limits[unescapedKey]; - - // Get a common key if multiple users share the same quota, otherwise take the public key - var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : publicKey; - - batchTotalSize(batchKey, cb, function (done) { - var channels = []; - var bytes = 0; - nThen(function (waitFor) { - // Get the channels list for our user account - getChannelList(Env, publicKey, waitFor(function (_channels) { - if (!_channels) { - waitFor.abort(); - return done('INVALID_PIN_LIST'); - } - Array.prototype.push.apply(channels, _channels); - })); - // Get the channels list for users sharing our quota - if (limit && Array.isArray(limit.users) && limit.users.length > 1) { - limit.users.forEach(function (key) { - if (key === unescapedKey) { return; } // Don't count ourselves twice - getChannelList(Env, key, waitFor(function (_channels) { - if (!_channels) { return; } // Broken user, don't count their quota - Array.prototype.push.apply(channels, _channels); - })); - }); - } - }).nThen(function (waitFor) { - // Get size of the channels - var list = []; // Contains the channels already counted in the quota to avoid duplicates - channels.forEach(function (channel) { // TODO semaphore? - if (list.indexOf(channel) !== -1) { return; } - list.push(channel); - getFileSize(Env, channel, waitFor(function (e, size) { - if (!e) { bytes += size; } - })); - }); - }).nThen(function () { - done(void 0, bytes); - }); - }); -}; - -var hashChannelList = function (A) { - var uniques = []; - - A.forEach(function (a) { - if (uniques.indexOf(a) === -1) { uniques.push(a); } - }); - uniques.sort(); - - var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl - .util.decodeUTF8(JSON.stringify(uniques)))); - - return hash; -}; - -var getHash = function (Env, publicKey, cb) { - getChannelList(Env, publicKey, function (channels) { - cb(void 0, hashChannelList(channels)); - }); -}; - -var applyCustomLimits = function (Env, config) { - var isLimit = function (o) { - var valid = o && typeof(o) === 'object' && - typeof(o.limit) === 'number' && - typeof(o.plan) === 'string' && - typeof(o.note) === 'string'; - return valid; - }; - - // read custom limits from the config - var customLimits = (function (custom) { - var limits = {}; - Object.keys(custom).forEach(function (k) { - k.replace(/\/([^\/]+)$/, function (all, safeKey) { - var id = unescapeKeyCharacters(safeKey || ''); - limits[id] = custom[k]; - return ''; - }); - }); - return limits; - }(config.customLimits || {})); - - Object.keys(customLimits).forEach(function (k) { - if (!isLimit(customLimits[k])) { return; } - Env.limits[k] = customLimits[k]; - }); -}; - -// The limits object contains storage limits for all the publicKey that have paid -// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit -var updateLimits = function (Env, config, publicKey, cb /*:(?string, ?any[])=>void*/) { // FIXME BATCH? - - if (config.adminEmail === false) { - applyCustomLimits(Env, config); - if (config.allowSubscriptions === false) { return; } - throw new Error("allowSubscriptions must be false if adminEmail is false"); - } - if (typeof cb !== "function") { cb = function () {}; } - - var defaultLimit = typeof(config.defaultStorageLimit) === 'number'? - config.defaultStorageLimit: DEFAULT_LIMIT; - - var userId; - if (publicKey) { - userId = unescapeKeyCharacters(publicKey); - } - - var body = JSON.stringify({ - domain: config.myDomain, - subdomain: config.mySubdomain || null, - adminEmail: config.adminEmail, - version: Package.version - }); - var options = { - host: 'accounts.cryptpad.fr', - path: '/api/getauthorized', - method: 'POST', - headers: { - "Content-Type": "application/json", - "Content-Length": Buffer.byteLength(body) - } - }; - - var req = Https.request(options, function (response) { - if (!('' + response.statusCode).match(/^2\d\d$/)) { - return void cb('SERVER ERROR ' + response.statusCode); - } - var str = ''; - - response.on('data', function (chunk) { - str += chunk; - }); - - response.on('end', function () { - try { - var json = JSON.parse(str); - Env.limits = json; - applyCustomLimits(Env, config); - - var l; - if (userId) { - var limit = Env.limits[userId]; - l = limit && typeof limit.limit === "number" ? - [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; - } - cb(void 0, l); - } catch (e) { - cb(e); - } - }); - }); - - req.on('error', function (e) { - applyCustomLimits(Env, config); - if (!config.domain) { return cb(); } - cb(e); - }); - - req.end(body); -}; - -var getLimit = function (Env, publicKey, cb) { - var unescapedKey = unescapeKeyCharacters(publicKey); - var limit = Env.limits[unescapedKey]; - var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? - Env.defaultStorageLimit: DEFAULT_LIMIT; - - var toSend = limit && typeof(limit.limit) === "number"? - [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; - - cb(void 0, toSend); -}; - -var getFreeSpace = function (Env, publicKey, cb) { - getLimit(Env, publicKey, function (e, limit) { - if (e) { return void cb(e); } - getTotalSize(Env, publicKey, function (e, size) { - if (typeof(size) === 'undefined') { return void cb(e); } - - var rem = limit[0] - size; - if (typeof(rem) !== 'number') { - return void cb('invalid_response'); - } - cb(void 0, rem); - }); - }); -}; - -var sumChannelSizes = function (sizes) { - return Object.keys(sizes).map(function (id) { return sizes[id]; }) - .filter(function (x) { - // only allow positive numbers - return !(typeof(x) !== 'number' || x <= 0); - }) - .reduce(function (a, b) { return a + b; }, 0); -}; - -// inform that the -var loadChannelPins = function (Env) { - Pinned.load(function (err, data) { - if (err) { - Log.error("LOAD_CHANNEL_PINS", err); - - // FIXME not sure what should be done here instead - Env.pinnedPads = {}; - Env.evPinnedPadsReady.fire(); - return; - } - - - Env.pinnedPads = data; - Env.evPinnedPadsReady.fire(); - }, { - pinPath: Env.paths.pin, - }); -}; -var addPinned = function ( - Env, - publicKey /*:string*/, - channelList /*Array*/, - cb /*:()=>void*/) -{ - Env.evPinnedPadsReady.reg(() => { - channelList.forEach((c) => { - const x = Env.pinnedPads[c] = Env.pinnedPads[c] || {}; - x[publicKey] = 1; - }); - cb(); - }); -}; -var removePinned = function ( - Env, - publicKey /*:string*/, - channelList /*Array*/, - cb /*:()=>void*/) -{ - Env.evPinnedPadsReady.reg(() => { - channelList.forEach((c) => { - const x = Env.pinnedPads[c]; - if (!x) { return; } - delete x[publicKey]; - }); - cb(); - }); -}; -var isChannelPinned = function (Env, channel, cb) { - Env.evPinnedPadsReady.reg(() => { - if (Env.pinnedPads[channel] && Object.keys(Env.pinnedPads[channel]).length) { - cb(true); - } else { - delete Env.pinnedPads[channel]; - cb(false); - } - }); -}; - -var pinChannel = function (Env, publicKey, channels, cb) { - if (!channels && channels.filter) { - return void cb('INVALID_PIN_LIST'); - } - - // get channel list ensures your session has a cached channel list - getChannelList(Env, publicKey, function (pinned) { - var session = getSession(Env.Sessions, publicKey); - - // only pin channels which are not already pinned - var toStore = channels.filter(function (channel) { - return pinned.indexOf(channel) === -1; - }); - - if (toStore.length === 0) { - return void getHash(Env, publicKey, cb); - } - - getMultipleFileSize(Env, toStore, function (e, sizes) { - if (typeof(sizes) === 'undefined') { return void cb(e); } - var pinSize = sumChannelSizes(sizes); - - getFreeSpace(Env, publicKey, function (e, free) { - if (typeof(free) === 'undefined') { - WARN('getFreeSpace', e); - return void cb(e); - } - if (pinSize > free) { return void cb('E_OVER_LIMIT'); } - - Env.pinStore.message(publicKey, JSON.stringify(['PIN', toStore, +new Date()]), - function (e) { - if (e) { return void cb(e); } - toStore.forEach(function (channel) { - session.channels[channel] = true; - }); - addPinned(Env, publicKey, toStore, () => {}); - getHash(Env, publicKey, cb); - }); - }); - }); - }); -}; - -var unpinChannel = function (Env, publicKey, channels, cb) { - if (!channels && channels.filter) { - // expected array - return void cb('INVALID_PIN_LIST'); - } - - getChannelList(Env, publicKey, function (pinned) { - var session = getSession(Env.Sessions, publicKey); - - // only unpin channels which are pinned - var toStore = channels.filter(function (channel) { - return pinned.indexOf(channel) !== -1; - }); - - if (toStore.length === 0) { - return void getHash(Env, publicKey, cb); - } - - Env.pinStore.message(publicKey, JSON.stringify(['UNPIN', toStore, +new Date()]), - function (e) { - if (e) { return void cb(e); } - toStore.forEach(function (channel) { - delete session.channels[channel]; - }); - removePinned(Env, publicKey, toStore, () => {}); - getHash(Env, publicKey, cb); - }); - }); -}; - -var resetUserPins = function (Env, publicKey, channelList, cb) { - if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } - var session = getSession(Env.Sessions, publicKey); - - if (!channelList.length) { - return void getHash(Env, publicKey, function (e, hash) { - if (e) { return cb(e); } - cb(void 0, hash); - }); - } - - var pins = {}; - getMultipleFileSize(Env, channelList, function (e, sizes) { - if (typeof(sizes) === 'undefined') { return void cb(e); } - var pinSize = sumChannelSizes(sizes); - - - getLimit(Env, publicKey, function (e, limit) { - if (e) { - WARN('[RESET_ERR]', e); - return void cb(e); - } - - /* we want to let people pin, even if they are over their limit, - but they should only be able to do this once. - - This prevents data loss in the case that someone registers, but - does not have enough free space to pin their migrated data. - - They will not be able to pin additional pads until they upgrade - or delete enough files to go back under their limit. */ - if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } - Env.pinStore.message(publicKey, JSON.stringify(['RESET', channelList, +new Date()]), - function (e) { - if (e) { return void cb(e); } - channelList.forEach(function (channel) { - pins[channel] = true; - }); - - var oldChannels; - if (session.channels && typeof(session.channels) === 'object') { - oldChannels = Object.keys(session.channels); - } else { - oldChannels = []; - } - removePinned(Env, publicKey, oldChannels, () => { - addPinned(Env, publicKey, channelList, ()=>{}); - }); - - // update in-memory cache IFF the reset was allowed. - session.channels = pins; - getHash(Env, publicKey, function (e, hash) { - cb(e, hash); - }); - }); - }); - }); -}; - -var clearOwnedChannel = function (Env, channelId, unsafeKey, cb) { - if (typeof(channelId) !== 'string' || channelId.length !== 32) { - return cb('INVALID_ARGUMENTS'); - } - - getMetadata(Env, channelId, function (err, metadata) { - if (err) { return void cb(err); } - if (!(metadata && Array.isArray(metadata.owners))) { return void cb('E_NO_OWNERS'); } - // Confirm that the channel is owned by the user in question - if (metadata.owners.indexOf(unsafeKey) === -1) { - return void cb('INSUFFICIENT_PERMISSIONS'); - } - // FIXME COLDSTORAGE - return void Env.msgStore.clearChannel(channelId, function (e) { - cb(e); - }); - }); -}; - -var removeOwnedChannel = function (Env, channelId, unsafeKey, cb) { - if (typeof(channelId) !== 'string' || !isValidId(channelId)) { - return cb('INVALID_ARGUMENTS'); - } - - if (Env.blobStore.isFileId(channelId)) { - var safeKey = escapeKeyCharacters(unsafeKey); - var blobId = channelId; - - return void nThen(function (w) { - // check if you have permissions - Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { - if (err || !owned) { - w.abort(); - return void cb("INSUFFICIENT_PERMISSIONS"); - } - })); - }).nThen(function (w) { - // remove the blob - - if (Env.retainData) { - return void Env.blobStore.archive.blob(blobId, w(function (err) { - Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - w.abort(); - return void cb(err); - } - })); - } - Env.blobStore.remove.blob(blobId, w(function (err) { - Log.info('DELETION_OWNED_FILE_BY_OWNER_RPC', { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - w.abort(); - return void cb(err); - } - })); - }).nThen(function () { - // remove the proof - if (Env.retainData) { - return void Env.blobStore.archive.proof(safeKey, blobId, function (err) { - Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - return void cb("E_PROOF_REMOVAL"); - } - cb(); - }); - } - - Env.blobStore.remove.proof(safeKey, blobId, function (err) { - Log.info("DELETION_PROOF_REMOVAL_BY_OWNER_RPC", { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - return void cb("E_PROOF_REMOVAL"); - } - cb(); - }); - }); - } - - getMetadata(Env, channelId, function (err, metadata) { - if (err) { return void cb(err); } - if (!(metadata && Array.isArray(metadata.owners))) { return void cb('E_NO_OWNERS'); } - if (metadata.owners.indexOf(unsafeKey) === -1) { - return void cb('INSUFFICIENT_PERMISSIONS'); - } - // if the admin has configured data retention... - // temporarily archive the file instead of removing it - if (Env.retainData) { - return void Env.msgStore.archiveChannel(channelId, function (e) { - Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { - unsafeKey: unsafeKey, - channelId: channelId, - status: e? String(e): 'SUCCESS', - }); - cb(e); - }); - } - - return void Env.msgStore.removeChannel(channelId, function (e) { - Log.info('DELETION_CHANNEL_BY_OWNER_RPC', { - unsafeKey: unsafeKey, - channelId: channelId, - status: e? String(e): 'SUCCESS', - }); - cb(e); - }); - }); -}; - -/* Users should be able to clear their own pin log with an authenticated RPC -*/ -var removePins = function (Env, safeKey, cb) { - if (typeof(Env.pinStore.removeChannel) !== 'function') { - return void cb("E_NOT_IMPLEMENTED"); - } - Env.pinStore.removeChannel(safeKey, function (err) { - Log.info('DELETION_PIN_BY_OWNER_RPC', { - safeKey: safeKey, - status: err? String(err): 'SUCCESS', - }); - - cb(err); - }); -}; - -/* - We assume that the server is secured against MitM attacks - via HTTPS, and that malicious actors do not have code execution - capabilities. If they do, we have much more serious problems. - - The capability to replay a block write or remove results in either - a denial of service for the user whose block was removed, or in the - case of a write, a rollback to an earlier password. - - Since block modification is destructive, this can result in loss - of access to the user's drive. - - So long as the detached signature is never observed by a malicious - party, and the server discards it after proof of knowledge, replays - are not possible. However, this precludes verification of the signature - at a later time. - - Despite this, an integrity check is still possible by the original - author of the block, since we assume that the block will have been - encrypted with xsalsa20-poly1305 which is authenticated. -*/ -var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS - // convert the public key to a Uint8Array and validate it - if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } - - var u8_public_key; - try { - u8_public_key = Nacl.util.decodeBase64(publicKey); - } catch (e) { - return void cb('E_INVALID_KEY'); - } - - var u8_signature; - try { - u8_signature = Nacl.util.decodeBase64(signature); - } catch (e) { - Log.error('INVALID_BLOCK_SIGNATURE', e); - return void cb('E_INVALID_SIGNATURE'); - } - - // convert the block to a Uint8Array - var u8_block; - try { - u8_block = Nacl.util.decodeBase64(block); - } catch (e) { - return void cb('E_INVALID_BLOCK'); - } - - // take its hash - var hash = Nacl.hash(u8_block); - - // validate the signature against the hash of the content - var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); - - // existing authentication ensures that users cannot replay old blocks - - // call back with (err) if unsuccessful - if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } - - return void cb(null, u8_block); -}; - -var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS - // prepare publicKey to be used as a file name - var safeKey = escapeKeyCharacters(publicKey); - - // validate safeKey - if (typeof(safeKey) !== 'string') { - return; - } - - // derive the full path - // /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd - return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); -}; - -var writeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS - //console.log(msg); - var publicKey = msg[0]; - var signature = msg[1]; - var block = msg[2]; - - validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { - if (e) { return void cb(e); } - if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } - - // derive the filepath - var path = createLoginBlockPath(Env, publicKey); - - // make sure the path is valid - if (typeof(path) !== 'string') { - return void cb('E_INVALID_BLOCK_PATH'); - } - - var parsed = Path.parse(path); - if (!parsed || typeof(parsed.dir) !== 'string') { - return void cb("E_INVALID_BLOCK_PATH_2"); - } - - nThen(function (w) { - // make sure the path to the file exists - Fse.mkdirp(parsed.dir, w(function (e) { - if (e) { - w.abort(); - cb(e); - } - })); - }).nThen(function () { - // actually write the block - - // flow is dumb and I need to guard against this which will never happen - /*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ - /*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ - Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { - if (err) { return void cb(err); } - cb(); - }); - }); - }); -}; - -/* - When users write a block, they upload the block, and provide - a signature proving that they deserve to be able to write to - the location determined by the public key. - - When removing a block, there is nothing to upload, but we need - to sign something. Since the signature is considered sensitive - information, we can just sign some constant and use that as proof. - -*/ -var removeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS - var publicKey = msg[0]; - var signature = msg[1]; - var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant - - validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { - if (e) { return void cb(e); } - // derive the filepath - var path = createLoginBlockPath(Env, publicKey); - - // make sure the path is valid - if (typeof(path) !== 'string') { - return void cb('E_INVALID_BLOCK_PATH'); - } - - // FIXME COLDSTORAGE - Fs.unlink(path, function (err) { - Log.info('DELETION_BLOCK_BY_OWNER_RPC', { - publicKey: publicKey, - path: path, - status: err? String(err): 'SUCCESS', - }); - - if (err) { return void cb(err); } - cb(); - }); - }); -}; - -var ARRAY_LINE = /^\[/; - -/* Files can contain metadata but not content - call back with true if the channel log has no content other than metadata - otherwise false -*/ -var isNewChannel = function (Env, channel, cb) { - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length !== 32) { return void cb('INVALID_CHAN'); } - - var done = false; - Env.msgStore.getMessages(channel, function (msg) { - if (done) { return; } - try { - if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { - done = true; - return void cb(void 0, false); - } - } catch (e) { - WARN('invalid message read from store', e); - } - }, function () { - if (done) { return; } - // no more messages... - cb(void 0, true); - }); -}; - -/* writePrivateMessage - allows users to anonymously send a message to the channel - prevents their netflux-id from being stored in history - and from being broadcast to anyone that might currently be in the channel - - Otherwise behaves the same as sending to a channel -*/ -var writePrivateMessage = function (Env, args, nfwssCtx, cb) { - var channelId = args[0]; - var msg = args[1]; - - // don't bother handling empty messages - if (!msg) { return void cb("INVALID_MESSAGE"); } - - // don't support anything except regular channels - if (!isValidId(channelId) || channelId.length !== 32) { - return void cb("INVALID_CHAN"); - } - - // We expect a modern netflux-websocket-server instance - // if this API isn't here everything will fall apart anyway - if (!(nfwssCtx && nfwssCtx.historyKeeper && typeof(nfwssCtx.historyKeeper.onChannelMessage) === 'function')) { - return void cb("NOT_IMPLEMENTED"); - } - - // historyKeeper expects something with an 'id' attribute - // it will fail unless you provide it, but it doesn't need anything else - var channelStruct = { - id: channelId, - }; - - // construct a message to store and broadcast - var fullMessage = [ - 0, // idk - null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way - "MSG", // indicate that this is a MSG - channelId, // channel id - msg // the actual message content. Generally a string - ]; - - // store the message and do everything else that is typically done when going through historyKeeper - nfwssCtx.historyKeeper.onChannelMessage(nfwssCtx, channelStruct, fullMessage); - - // call back with the message and the target channel. - // historyKeeper will take care of broadcasting it if anyone is in the channel - cb(void 0, { - channel: channelId, - message: fullMessage - }); -}; - -const batchDiskUsage = BatchRead("GET_DISK_USAGE"); -var getDiskUsage = function (Env, cb) { - batchDiskUsage('', cb, function (done) { - var data = {}; - nThen(function (waitFor) { - getFolderSize('./', waitFor(function(err, info) { - data.total = info; - })); - getFolderSize(Env.paths.pin, waitFor(function(err, info) { - data.pin = info; - })); - getFolderSize(Env.paths.blob, waitFor(function(err, info) { - data.blob = info; - })); - getFolderSize(Env.paths.staging, waitFor(function(err, info) { - data.blobstage = info; - })); - getFolderSize(Env.paths.block, waitFor(function(err, info) { - data.block = info; - })); - getFolderSize(Env.paths.data, waitFor(function(err, info) { - data.datastore = info; - })); - }).nThen(function () { - done(void 0, data); - }); - }); -}; - -const batchRegisteredUsers = BatchRead("GET_REGISTERED_USERS"); -var getRegisteredUsers = function (Env, cb) { - batchRegisteredUsers('', cb, function (done) { - var dir = Env.paths.pin; - var folders; - var users = 0; - nThen(function (waitFor) { - Fs.readdir(dir, waitFor(function (err, list) { - if (err) { - waitFor.abort(); - return void done(err); - } - folders = list; - })); - }).nThen(function (waitFor) { - folders.forEach(function (f) { - var dir = Env.paths.pin + '/' + f; - Fs.readdir(dir, waitFor(function (err, list) { - if (err) { return; } - users += list.length; - })); - }); - }).nThen(function () { - done(void 0, users); - }); - }); -}; -var getActiveSessions = function (Env, ctx, cb) { - var total = ctx.users ? Object.keys(ctx.users).length : '?'; - - var ips = []; - Object.keys(ctx.users).forEach(function (u) { - var user = ctx.users[u]; - var socket = user.socket; - var req = socket.upgradeReq; - var conn = req && req.connection; - var ip = (req && req.headers && req.headers['x-forwarded-for']) || (conn && conn.remoteAddress); - if (ip && ips.indexOf(ip) === -1) { - ips.push(ip); - } - }); - - cb (void 0, [total, ips.length]); -}; - -var adminCommand = function (Env, ctx, publicKey, config, data, cb) { - var admins = Env.admins; - if (admins.indexOf(publicKey) === -1) { - return void cb("FORBIDDEN"); - } - // Handle commands here - switch (data[0]) { - case 'ACTIVE_SESSIONS': - return getActiveSessions(Env, ctx, cb); - case 'ACTIVE_PADS': - return cb(void 0, ctx.channels ? Object.keys(ctx.channels).length : '?'); - case 'REGISTERED_USERS': - return getRegisteredUsers(Env, cb); - case 'DISK_USAGE': - return getDiskUsage(Env, cb); - case 'FLUSH_CACHE': - config.flushCache(); - return cb(void 0, true); - default: - return cb('UNHANDLED_ADMIN_COMMAND'); - } -}; - -var isUnauthenticatedCall = function (call) { - return [ - 'GET_FILE_SIZE', - 'GET_METADATA', - 'GET_MULTIPLE_FILE_SIZE', - 'IS_CHANNEL_PINNED', - 'IS_NEW_CHANNEL', - 'GET_HISTORY_OFFSET', - 'GET_DELETED_PADS', - 'WRITE_PRIVATE_MESSAGE', - ].indexOf(call) !== -1; -}; - -var isAuthenticatedCall = function (call) { - return [ - 'COOKIE', - 'RESET', - 'PIN', - 'UNPIN', - 'GET_HASH', - 'GET_TOTAL_SIZE', - 'UPDATE_LIMITS', - 'GET_LIMIT', - 'UPLOAD_STATUS', - 'UPLOAD_COMPLETE', - 'OWNED_UPLOAD_COMPLETE', - 'UPLOAD_CANCEL', - 'EXPIRE_SESSION', - 'CLEAR_OWNED_CHANNEL', - 'REMOVE_OWNED_CHANNEL', - 'REMOVE_PINS', - 'WRITE_LOGIN_BLOCK', - 'REMOVE_LOGIN_BLOCK', - 'ADMIN', - 'SET_METADATA' - ].indexOf(call) !== -1; -}; - -// upload_status -var upload_status = function (Env, safeKey, filesize, _cb) { // FIXME FILES - var cb = Util.once(Util.mkAsync(_cb)); - - // validate that the provided size is actually a positive number - if (typeof(filesize) !== 'number' && - filesize >= 0) { return void cb('E_INVALID_SIZE'); } - - if (filesize >= Env.maxUploadSize) { return cb('TOO_LARGE'); } - - nThen(function (w) { - var abortAndCB = Util.both(w.abort, cb); - Env.blobStore.status(safeKey, w(function (err, inProgress) { - // if there's an error something is weird - if (err) { return void abortAndCB(err); } - - // we cannot upload two things at once - if (inProgress) { return void abortAndCB(void 0, true); } - })); - }).nThen(function () { - // if yuo're here then there are no pending uploads - // check if you have space in your quota to upload something of this size - getFreeSpace(Env, safeKey, function (e, free) { - if (e) { return void cb(e); } - if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } - cb(void 0, false); - }); - }); -}; - -/*:: -const flow_Config = require('./config.example.js'); -type Config_t = typeof(flow_Config); -import type { ChainPadServer_Storage_t } from './storage/file.js' -type NetfluxWebsocketSrvContext_t = { - store: ChainPadServer_Storage_t, - getHistoryOffset: ( - ctx: NetfluxWebsocketSrvContext_t, - channelName: string, - lastKnownHash: ?string, - cb: (err: ?Error, offset: ?number)=>void - )=>void -}; -*/ -RPC.create = function ( - config /*:Config_t*/, - cb /*:(?Error, ?Function)=>void*/ -) { - Log = config.log; - - // load pin-store... - Log.silly('LOADING RPC MODULE'); - - var keyOrDefaultString = function (key, def) { - return typeof(config[key]) === 'string'? config[key]: def; - }; - - var Env = { - retainData: config.retainData || false, - defaultStorageLimit: config.defaultStorageLimit, - maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), - Sessions: {}, - paths: {}, - msgStore: config.store, - pinStore: (undefined /*:any*/), - pinnedPads: {}, - evPinnedPadsReady: mkEvent(true), - limits: {}, - admins: [], - }; - - try { - Env.admins = (config.adminKeys || []).map(function (k) { - k = k.replace(/\/+$/, ''); - var s = k.split('/'); - return s[s.length-1]; - }); - } catch (e) { - console.error("Can't parse admin keys. Please update or fix your config.js file!"); - } - - var Sessions = Env.Sessions; - var paths = Env.paths; - var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); - paths.block = keyOrDefaultString('blockPath', './block'); - paths.data = keyOrDefaultString('filePath', './datastore'); - paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); - paths.blob = keyOrDefaultString('blobPath', './blob'); - - var isUnauthenticateMessage = function (msg) { - return msg && msg.length === 2 && isUnauthenticatedCall(msg[0]); - }; - - var handleUnauthenticatedMessage = function (msg, respond, nfwssCtx) { - Log.silly('LOG_RPC', msg[0]); - switch (msg[0]) { - case 'GET_HISTORY_OFFSET': { - if (typeof(msg[1]) !== 'object' || typeof(msg[1].channelName) !== 'string') { - return respond('INVALID_ARG_FORMAT', msg); - } - const msgHash = typeof(msg[1].msgHash) === 'string' ? msg[1].msgHash : undefined; - nfwssCtx.getHistoryOffset(nfwssCtx, msg[1].channelName, msgHash, (e, ret) => { - if (e) { - if (e.code !== 'ENOENT') { - WARN(e.stack, msg); - } - return respond(e.message); - } - respond(e, [null, ret, null]); - }); - break; - } - case 'GET_FILE_SIZE': - return void getFileSize(Env, msg[1], function (e, size) { - WARN(e, msg[1]); - respond(e, [null, size, null]); - }); - case 'GET_METADATA': - return void getMetadata(Env, msg[1], function (e, data) { - WARN(e, msg[1]); - respond(e, [null, data, null]); - }); - case 'GET_MULTIPLE_FILE_SIZE': - return void getMultipleFileSize(Env, msg[1], function (e, dict) { - if (e) { - WARN(e, dict); - return respond(e); - } - respond(e, [null, dict, null]); - }); - case 'GET_DELETED_PADS': - return void getDeletedPads(Env, msg[1], function (e, list) { - if (e) { - WARN(e, msg[1]); - return respond(e); - } - respond(e, [null, list, null]); - }); - case 'IS_CHANNEL_PINNED': - return void isChannelPinned(Env, msg[1], function (isPinned) { - respond(null, [null, isPinned, null]); - }); - case 'IS_NEW_CHANNEL': - return void isNewChannel(Env, msg[1], function (e, isNew) { - respond(e, [null, isNew, null]); - }); - case 'WRITE_PRIVATE_MESSAGE': - return void writePrivateMessage(Env, msg[1], nfwssCtx, function (e, output) { - respond(e, output); - }); - default: - Log.warn("UNSUPPORTED_RPC_CALL", msg); - return respond('UNSUPPORTED_RPC_CALL', msg); - } - }; - - var rpc0 = function (ctx, data, respond) { - if (!Array.isArray(data)) { - Log.debug('INVALID_ARG_FORMET', data); - return void respond('INVALID_ARG_FORMAT'); - } - - if (!data.length) { - return void respond("INSUFFICIENT_ARGS"); - } else if (data.length !== 1) { - Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); - } - - var msg = data[0].slice(0); - - if (!Array.isArray(msg)) { - return void respond('INVALID_ARG_FORMAT'); - } - - if (isUnauthenticateMessage(msg)) { - return handleUnauthenticatedMessage(msg, respond, ctx); - } - - var signature = msg.shift(); - var publicKey = msg.shift(); - - // make sure a user object is initialized in the cookie jar - if (publicKey) { - getSession(Sessions, publicKey); - } else { - Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); - } - - var cookie = msg[0]; - if (!isValidCookie(Sessions, publicKey, cookie)) { - // no cookie is fine if the RPC is to get a cookie - if (msg[1] !== 'COOKIE') { - return void respond('NO_COOKIE'); - } - } - - var serialized = JSON.stringify(msg); - - if (!(serialized && typeof(publicKey) === 'string')) { - return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); - } - - if (isAuthenticatedCall(msg[1])) { - if (checkSignature(serialized, signature, publicKey) !== true) { - return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); - } - } else if (msg[1] !== 'UPLOAD') { - Log.warn('INVALID_RPC_CALL', msg[1]); - return void respond("INVALID_RPC_CALL"); - } - - var safeKey = escapeKeyCharacters(publicKey); - /* If you have gotten this far, you have signed the message with the - public key which you provided. - - We can safely modify the state for that key - - OR it's an unauthenticated call, which must not modify the state - for that key in a meaningful way. - */ - - // discard validated cookie from message - msg.shift(); - - var Respond = function (e, msg) { - var session = Sessions[safeKey]; - var token = session? session.tokens.slice(-1)[0]: ''; - var cookie = makeCookie(token).join('|'); - respond(e ? String(e): e, [cookie].concat(typeof(msg) !== 'undefined' ?msg: [])); - }; - - if (typeof(msg) !== 'object' || !msg.length) { - return void Respond('INVALID_MSG'); - } - - var handleMessage = function () { - Log.silly('LOG_RPC', msg[0]); - switch (msg[0]) { - case 'COOKIE': return void Respond(void 0); - case 'RESET': - return resetUserPins(Env, safeKey, msg[1], function (e, hash) { - //WARN(e, hash); - return void Respond(e, hash); - }); - case 'PIN': - return pinChannel(Env, safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'UNPIN': - return unpinChannel(Env, safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'GET_HASH': - return void getHash(Env, safeKey, function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'GET_TOTAL_SIZE': // TODO cache this, since it will get called quite a bit - return getTotalSize(Env, safeKey, function (e, size) { - if (e) { - WARN(e, safeKey); - return void Respond(e); - } - Respond(e, size); - }); - case 'GET_FILE_SIZE': - return void getFileSize(Env, msg[1], function (e, size) { - WARN(e, msg[1]); - Respond(e, size); - }); - case 'UPDATE_LIMITS': - return void updateLimits(Env, config, safeKey, function (e, limit) { - if (e) { - WARN(e, limit); - return void Respond(e); - } - Respond(void 0, limit); - }); - case 'GET_LIMIT': - return void getLimit(Env, safeKey, function (e, limit) { - if (e) { - WARN(e, limit); - return void Respond(e); - } - Respond(void 0, limit); - }); - case 'GET_MULTIPLE_FILE_SIZE': - return void getMultipleFileSize(Env, msg[1], function (e, dict) { - if (e) { - WARN(e, dict); - return void Respond(e); - } - Respond(void 0, dict); - }); - case 'EXPIRE_SESSION': - return void setTimeout(function () { - expireSession(Sessions, safeKey); - Respond(void 0, "OK"); - }); - case 'CLEAR_OWNED_CHANNEL': - return void clearOwnedChannel(Env, msg[1], publicKey, function (e, response) { - if (e) { return void Respond(e); } - Respond(void 0, response); - }); - - case 'REMOVE_OWNED_CHANNEL': - return void removeOwnedChannel(Env, msg[1], publicKey, function (e) { - if (e) { return void Respond(e); } - Respond(void 0, "OK"); - }); - case 'REMOVE_PINS': - return void removePins(Env, safeKey, function (e) { - if (e) { return void Respond(e); } - Respond(void 0, "OK"); - }); - case 'UPLOAD': - return void Env.blobStore.upload(safeKey, msg[1], function (e, len) { - WARN(e, len); - Respond(e, len); - }); - case 'UPLOAD_STATUS': - var filesize = msg[1]; - return void upload_status(Env, safeKey, filesize, function (e, yes) { - if (!e && !yes) { - // no pending uploads, set the new size - var user = getSession(Sessions, safeKey); - user.pendingUploadSize = filesize; - user.currentUploadSize = 0; - } - Respond(e, yes); - }); - case 'UPLOAD_COMPLETE': - return void Env.blobStore.complete(safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'OWNED_UPLOAD_COMPLETE': - return void Env.blobStore.completeOwned(safeKey, msg[1], function (e, blobId) { - WARN(e, blobId); - Respond(e, blobId); - }); - case 'UPLOAD_CANCEL': - // msg[1] is fileSize - // if we pass it here, we can start an upload right away without calling - // UPLOAD_STATUS again - return void Env.blobStore.cancel(safeKey, msg[1], function (e) { - WARN(e, 'UPLOAD_CANCEL'); - Respond(e); - }); - case 'WRITE_LOGIN_BLOCK': - return void writeLoginBlock(Env, msg[1], function (e) { - if (e) { - WARN(e, 'WRITE_LOGIN_BLOCK'); - return void Respond(e); - } - Respond(e); - }); - case 'REMOVE_LOGIN_BLOCK': - return void removeLoginBlock(Env, msg[1], function (e) { - if (e) { - WARN(e, 'REMOVE_LOGIN_BLOCK'); - return void Respond(e); - } - Respond(e); - }); - case 'ADMIN': - return void adminCommand(Env, ctx, safeKey, config, msg[1], function (e, result) { - if (e) { - WARN(e, result); - return void Respond(e); - } - Respond(void 0, result); - }); - case 'SET_METADATA': - return void setMetadata(Env, msg[1], publicKey, function (e, data) { - if (e) { - WARN(e, data); - return void Respond(e); - } - Respond(void 0, data); - }); - default: - return void Respond('UNSUPPORTED_RPC_CALL', msg); - } - }; - - handleMessage(true); - }; - - var rpc = function ( - ctx /*:NetfluxWebsocketSrvContext_t*/, - data /*:Array>*/, - respond /*:(?string, ?Array)=>void*/) - { - try { - return rpc0(ctx, data, respond); - } catch (e) { - console.log("Error from RPC with data " + JSON.stringify(data)); - console.log(e.stack); - } - }; - - var updateLimitDaily = function () { - updateLimits(Env, config, undefined, function (e) { - if (e) { - WARN('limitUpdate', e); - } - }); - }; - updateLimitDaily(); - setInterval(updateLimitDaily, 24*3600*1000); - - loadChannelPins(Env); - - nThen(function (w) { - Store.create({ - filePath: pinPath, - }, w(function (s) { - Env.pinStore = s; - })); - BlobStore.create({ - blobPath: config.blobPath, - blobStagingPath: config.blobStagingPath, - archivePath: config.archivePath, - getSession: function (safeKey) { - return getSession(Sessions, safeKey); - }, - }, w(function (err, blob) { - if (err) { throw new Error(err); } - Env.blobStore = blob; - })); - }).nThen(function () { - cb(void 0, rpc); - // expire old sessions once per minute - setInterval(function () { - expireSessions(Sessions); - }, SESSION_EXPIRATION_TIME); - }); -}; diff --git a/scripts/evict-inactive.js b/scripts/evict-inactive.js index 13028b8ff..f0e801909 100644 --- a/scripts/evict-inactive.js +++ b/scripts/evict-inactive.js @@ -15,8 +15,6 @@ var inactiveTime = +new Date() - (config.inactiveTime * 24 * 3600 * 1000); // files which were archived before this date can be considered safe to remove var retentionTime = +new Date() - (config.archiveRetentionTime * 24 * 3600 * 1000); -var retainData = Boolean(config.retainData); - var getNewestTime = function (stats) { return stats[['atime', 'ctime', 'mtime'].reduce(function (a, b) { return stats[b] > stats[a]? b: a; @@ -176,23 +174,6 @@ nThen(function (w) { if (pins[item.blobId]) { return void next(); } if (item && getNewestTime(item) > retentionTime) { return void next(); } - if (!retainData) { - return void blobs.remove.blob(item.blobId, function (err) { - if (err) { - Log.error("EVICT_BLOB_ERROR", { - error: err, - item: item, - }); - return void next(); - } - Log.info("EVICT_BLOB_INACTIVE", { - item: item, - }); - removed++; - next(); - }); - } - blobs.archive.blob(item.blobId, function (err) { if (err) { Log.error("EVICT_ARCHIVE_BLOB_ERROR", { @@ -247,7 +228,6 @@ nThen(function (w) { Log.info("EVICT_BLOB_PROOFS_REMOVED", removed); })); }).nThen(function (w) { - var removed = 0; var channels = 0; var archived = 0; @@ -279,42 +259,22 @@ nThen(function (w) { // ignore the channel if it's pinned if (pins[item.channel]) { return void cb(); } - // if the server is configured to retain data, archive the channel - if (config.retainData) { - return void store.archiveChannel(item.channel, w(function (err) { - if (err) { - Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', { - error: err, - channel: item.channel, - }); - return void cb(); - } - Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel); - archived++; - cb(); - })); - } - - // otherwise remove it - store.removeChannel(item.channel, w(function (err) { + return void store.archiveChannel(item.channel, w(function (err) { if (err) { - Log.error('EVICT_CHANNEL_REMOVAL_ERROR', { + Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', { error: err, channel: item.channel, }); return void cb(); } - Log.info('EVICT_CHANNEL_REMOVAL', item.channel); - removed++; + Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel); + archived++; cb(); })); }; var done = function () { - if (config.retainData) { - return void Log.info('EVICT_CHANNELS_ARCHIVED', archived); - } - return void Log.info('EVICT_CHANNELS_REMOVED', removed); + return void Log.info('EVICT_CHANNELS_ARCHIVED', archived); }; store.listChannels(handler, w(done)); diff --git a/scripts/tests/test-rpc.js b/scripts/tests/test-rpc.js index 70ea13053..07f30bc46 100644 --- a/scripts/tests/test-rpc.js +++ b/scripts/tests/test-rpc.js @@ -159,6 +159,13 @@ var createUser = function (config, cb) { } wc.leave(); })); + }).nThen(function (w) { + // FIXME give the server time to write your mailbox data before checking that it's correct + // chainpad-server sends an ACK before the channel has actually been created + // causing you to think that everything is good. + // without this timeout the GET_METADATA rpc occasionally returns before + // the metadata has actually been written to the disk. + setTimeout(w(), 500); }).nThen(function (w) { // confirm that you own your mailbox user.anonRpc.send("GET_METADATA", user.mailboxChannel, w(function (err, data) { @@ -227,6 +234,18 @@ var createUser = function (config, cb) { return void cb(err); } })); + }).nThen(function (w) { + // some basic sanity checks... + user.rpc.getServerHash(w(function (err, hash) { + if (err) { + w.abort(); + return void cb(err); + } + if (hash !== EMPTY_ARRAY_HASH) { + console.error("EXPECTED EMPTY ARRAY HASH"); + process.exit(1); + } + })); }).nThen(function () { user.cleanup = function (cb) { diff --git a/scripts/tests/test-scheduler.js b/scripts/tests/test-scheduler.js new file mode 100644 index 000000000..6a076d5aa --- /dev/null +++ b/scripts/tests/test-scheduler.js @@ -0,0 +1,220 @@ +/* three types of actions: + * read + * write + * append + each of which take a random amount of time + +*/ +var Util = require("../../lib/common-util"); +var schedule = require("../../lib/schedule")(); +var nThen = require("nthen"); + +var rand = function (n) { + return Math.floor(Math.random() * n); +}; + +var rand_time = function () { + // between 51 and 151 + return rand(300) + 25; +}; + +var makeAction = function (type) { + var i = 0; + return function (time) { + var j = i++; + return function (next) { + console.log(" Beginning action: %s#%s", type, j); + setTimeout(function () { + console.log(" Completed action: %s#%s", type, j); + next(); + }, time); + return j; + }; + }; +}; + +var TYPES = ['WRITE', 'READ', 'APPEND']; +var chooseAction = function () { + var n = rand(100); + + if (n < 50) { return 'APPEND'; } + if (n < 90) { return 'READ'; } + return 'WRITE'; + + //return TYPES[rand(3)]; +}; + +var test = function (script, cb) { + var uid = Util.uid(); + + var TO_RUN = script.length; + var total_run = 0; + + var parallel = 0; + var last_run_ordered = -1; + //var i = 0; + + var ACTIONS = {}; + TYPES.forEach(function (type) { + ACTIONS[type] = makeAction(type); + }); + + nThen(function (w) { + setTimeout(w(), 3000); + // run scripted actions with assertions + script.forEach(function (scene) { + var type = scene[0]; + var time = typeof(scene[1]) === 'number'? scene[1]: rand_time(); + + var action = ACTIONS[type](time); + console.log("Queuing action of type: %s(%s)", type, time); + + var proceed = w(); + + switch (type) { + case 'APPEND': + return schedule.ordered(uid, w(function (next) { + parallel++; + var temp = action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + if (temp !== (last_run_ordered + 1)) { + throw new Error("out of order"); + } + last_run_ordered = temp; + })); + case 'WRITE': + return schedule.blocking(uid, w(function (next) { + parallel++; + action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + if (parallel > 1) { + console.log("parallelism === %s", parallel); + throw new Error("too much parallel"); + } + })); + case 'READ': + return schedule.unordered(uid, w(function (next) { + parallel++; + action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + })); + default: + throw new Error("wut"); + } + }); + }).nThen(function () { + // make assertions about the whole script + if (total_run !== TO_RUN) { + console.log("Ran %s / %s", total_run, TO_RUN); + throw new Error("skipped tasks"); + } + console.log("total_run === %s", total_run); + + cb(); + }); +}; + + +var randomScript = function () { + var len = rand(15) + 10; + var script = []; + while (len--) { + script.push([ + chooseAction(), + rand_time(), + ]); + } + return script; +}; + +var WRITE = function (t) { + return ['WRITE', t]; +}; +var READ = function (t) { + return ['READ', t]; +}; + +var APPEND = function (t) { + return ['APPEND', t]; +}; + +nThen(function (w) { + test([ + ['READ', 150], + ['APPEND', 200], + ['APPEND', 100], + ['READ', 350], + ['WRITE', 400], + ['APPEND', 275], + ['APPEND', 187], + ['WRITE', 330], + ['WRITE', 264], + ['WRITE', 256], + ], w(function () { + console.log("finished pre-scripted test\n"); + })); +}).nThen(function (w) { + test([ + WRITE(289), + APPEND(281), + READ(207), + WRITE(225), + READ(279), + WRITE(300), + READ(331), + APPEND(341), + APPEND(385), + READ(313), + WRITE(285), + READ(304), + APPEND(273), + APPEND(150), + WRITE(246), + READ(244), + WRITE(172), + APPEND(253), + READ(215), + READ(296), + APPEND(281), + APPEND(296), + WRITE(168), + ], w(function () { + console.log("finished 2nd pre-scripted test\n"); + })); +}).nThen(function () { + var totalTests = 50; + var randomTests = 1; + + var last = nThen(function () { + console.log("beginning randomized tests"); + }); + + var queueRandomTest = function (i) { + last = last.nThen(function (w) { + console.log("running random test script #%s\n", i); + test(randomScript(), w(function () { + console.log("finished random test #%s\n", i); + })); + }); + }; + + while (randomTests <=totalTests) { queueRandomTest(randomTests++); } + + last.nThen(function () { + console.log("finished %s random tests", totalTests); + }); +}); + + diff --git a/server.js b/server.js index 399eb1442..70479d7ee 100644 --- a/server.js +++ b/server.js @@ -4,17 +4,12 @@ var Express = require('express'); var Http = require('http'); var Fs = require('fs'); -var WebSocketServer = require('ws').Server; -var NetfluxSrv = require('chainpad-server/NetfluxWebsocketSrv'); var Package = require('./package.json'); var Path = require("path"); var nThen = require("nthen"); var config = require("./lib/load-config"); -// support multiple storage back ends -var Storage = require('./storage/file'); - var app = Express(); // mode can be FRESH (default), DEV, or PACKAGE @@ -69,11 +64,9 @@ var setHeaders = (function () { if (Object.keys(headers).length) { return function (req, res) { const h = [ - /^\/pad(2)?\/inner\.html.*/, + /^\/pad\/inner\.html.*/, /^\/common\/onlyoffice\/.*\/index\.html.*/, - /^\/sheet\/inner\.html.*/, - /^\/ooslide\/inner\.html.*/, - /^\/oodoc\/inner\.html.*/, + /^\/(sheet|ooslide|oodoc)\/inner\.html.*/, ].some((regex) => { return regex.test(req.url) }) ? padHeaders : headers; @@ -117,11 +110,6 @@ app.use(function (req, res, next) { app.use(Express.static(__dirname + '/www')); -Fs.exists(__dirname + "/customize", function (e) { - if (e) { return; } - console.log("Cryptpad is customizable, see customize.dist/readme.md for details"); -}); - // FIXME I think this is a regression caused by a recent PR // correct this hack without breaking the contributor's intended behaviour. @@ -207,81 +195,36 @@ app.use(function (req, res, next) { var httpServer = Http.createServer(app); -httpServer.listen(config.httpPort,config.httpAddress,function(){ - var host = config.httpAddress; - var hostName = !host.indexOf(':') ? '[' + host + ']' : host; - - var port = config.httpPort; - var ps = port === 80? '': ':' + port; - - console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps); -}); -if (config.httpSafePort) { - Http.createServer(app).listen(config.httpSafePort, config.httpAddress); -} - -var wsConfig = { server: httpServer }; +nThen(function (w) { + Fs.exists(__dirname + "/customize", w(function (e) { + if (e) { return; } + console.log("Cryptpad is customizable, see customize.dist/readme.md for details"); + })); +}).nThen(function (w) { + httpServer.listen(config.httpPort,config.httpAddress,function(){ + var host = config.httpAddress; + var hostName = !host.indexOf(':') ? '[' + host + ']' : host; -var rpc; -var historyKeeper; + var port = config.httpPort; + var ps = port === 80? '': ':' + port; -var log; + console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps); + }); -// Initialize logging, the the store, then tasks, then rpc, then history keeper and then start the server -var nt = nThen(function (w) { - // set up logger - var Logger = require("./lib/log"); - //console.log("Loading logging module"); - Logger.create(config, w(function (_log) { - log = config.log = _log; - })); -}).nThen(function (w) { - if (config.externalWebsocketURL) { - // if you plan to use an external websocket server - // then you don't need to load any API services other than the logger. - // Just abort. - w.abort(); - return; + if (config.httpSafePort) { + Http.createServer(app).listen(config.httpSafePort, config.httpAddress, w()); } - Storage.create(config, w(function (_store) { - config.store = _store; - })); -}).nThen(function (w) { - var Tasks = require("./storage/tasks"); - Tasks.create(config, w(function (e, tasks) { - if (e) { - throw e; - } - config.tasks = tasks; - if (config.disableIntegratedTasks) { return; } - setInterval(function () { - tasks.runAll(function (err) { - if (err) { - // either TASK_CONCURRENCY or an error with tasks.list - // in either case it is already logged. - } - }); - }, 1000 * 60 * 5); // run every five minutes - })); -}).nThen(function (w) { - require("./rpc").create(config, w(function (e, _rpc) { - if (e) { - w.abort(); - throw e; - } - rpc = _rpc; - })); }).nThen(function () { - var HK = require('./historyKeeper.js'); - var hkConfig = { - tasks: config.tasks, - rpc: rpc, - store: config.store, - log: log, - retainData: Boolean(config.retainData), - }; - historyKeeper = HK.create(hkConfig); -}).nThen(function () { - var wsSrv = new WebSocketServer(wsConfig); - NetfluxSrv.run(wsSrv, config, historyKeeper); + var wsConfig = { server: httpServer }; + + // Initialize logging then start the API server + require("./lib/log").create(config, function (_log) { + config.log = _log; + config.httpServer = httpServer; + + if (config.externalWebsocketURL) { return; } + require("./lib/api").create(config); + }); }); + + diff --git a/storage/file.js b/storage/file.js index bb65cff43..cb29d7fab 100644 --- a/storage/file.js +++ b/storage/file.js @@ -7,6 +7,10 @@ var Path = require("path"); var nThen = require("nthen"); var Semaphore = require("saferphore"); var Util = require("../lib/common-util"); +var Meta = require("../lib/metadata"); +var Extras = require("../lib/hk-util"); + +const Schedule = require("../lib/schedule"); const Readline = require("readline"); const ToPull = require('stream-to-pull-stream'); const Pull = require('pull-stream'); @@ -37,6 +41,10 @@ var mkArchiveMetadataPath = function (env, channelId) { return Path.join(env.archiveRoot, 'datastore', channelId.slice(0, 2), channelId) + '.metadata.ndjson'; }; +var mkTempPath = function (env, channelId) { + return mkPath(env, channelId) + '.temp'; +}; + // pass in the path so we can reuse the same function for archived files var channelExists = function (filepath, cb) { Fs.stat(filepath, function (err, stat) { @@ -553,9 +561,6 @@ var listChannels = function (root, handler, cb) { // to an equivalent location in the cold storage directory var archiveChannel = function (env, channelName, cb) { // TODO close channels before archiving them? - if (!env.retainData) { - return void cb("ARCHIVES_DISABLED"); - } // ctime is the most reliable indicator of when a file was archived // because it is used to indicate changes to the files metadata @@ -752,6 +757,8 @@ var getChannel = function ( } if (env.openFiles >= env.openFileLimit) { + // FIXME warn if this is the case? + // alternatively use graceful-fs to handle lots of concurrent reads // if you're running out of open files, asynchronously clean up expired files // do it on a shorter timeframe, though (half of normal) setTimeout(function () { @@ -867,40 +874,187 @@ var getMessages = function (env, chanName, handler, cb) { }); }; -/*:: -export type ChainPadServer_MessageObj_t = { buff: Buffer, offset: number }; -export type ChainPadServer_Storage_t = { - readMessagesBin: ( - channelName:string, - start:number, - asyncMsgHandler:(msg:ChainPadServer_MessageObj_t, moreCb:()=>void, abortCb:()=>void)=>void, - cb:(err:?Error)=>void - )=>void, - message: (channelName:string, content:string, cb:(err:?Error)=>void)=>void, - messageBin: (channelName:string, content:Buffer, cb:(err:?Error)=>void)=>void, - getMessages: (channelName:string, msgHandler:(msg:string)=>void, cb:(err:?Error)=>void)=>void, - removeChannel: (channelName:string, cb:(err:?Error)=>void)=>void, - closeChannel: (channelName:string, cb:(err:?Error)=>void)=>void, - flushUnusedChannels: (cb:()=>void)=>void, - getChannelSize: (channelName:string, cb:(err:?Error, size:?number)=>void)=>void, - getChannelMetadata: (channelName:string, cb:(err:?Error|string, data:?any)=>void)=>void, - clearChannel: (channelName:string, (err:?Error)=>void)=>void -}; -export type ChainPadServer_Config_t = { - verbose?: boolean, - filePath?: string, - channelExpirationMs?: number, - openFileLimit?: number +var trimChannel = function (env, channelName, hash, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + // this function is queued as a blocking action for the relevant channel + + // derive temporary file paths for metadata and log buffers + var tempChannelPath = mkTempPath(env, channelName); + + // derive production db paths + var channelPath = mkPath(env, channelName); + var metadataPath = mkMetadataPath(env, channelName); + + // derive archive paths + var archiveChannelPath = mkArchivePath(env, channelName); + var archiveMetadataPath = mkArchiveMetadataPath(env, channelName); + + var metadataReference = {}; + + var tempStream; + var ABORT; + + var cleanUp = function (cb) { + if (tempStream && !tempStream.closed) { + try { + tempStream.close(); + } catch (err) { } + } + + Fse.unlink(tempChannelPath, function (err) { + // proceed if deleted or if there was nothing to delete + if (!err || err.code === 'ENOENT') { return cb(); } + // else abort and call back with the error + cb(err); + }); + }; + + nThen(function (w) { + // close the file descriptor if it is open + closeChannel(env, channelName, w(function (err) { + if (err) { + w.abort(); + return void cb(err); + } + })); + }).nThen(function (w) { + cleanUp(w(function (err) { + if (err) { + w.abort(); + cb(err); + } + })); + }).nThen(function (w) { + // eat errors since loading the logger here would create a cyclical dependency + var lineHandler = Meta.createLineHandler(metadataReference, Util.noop); + + readMetadata(env, channelName, lineHandler, w(function (err) { + if (err) { + w.abort(); + return void cb(err); + } + // if there were no errors just fall through to the next block + })); + }).nThen(function (w) { + // create temp buffer writeStream + tempStream = Fs.createWriteStream(tempChannelPath, { + flags: 'a', + }); + tempStream.on('open', w()); + tempStream.on('error', function (err) { + w.abort(); + ABORT = true; + cleanUp(function () { + cb(err); + }); + }); + }).nThen(function (w) { + var i = 0; + var retain = false; + + var handler = function (msgObj, readMore, abort) { + if (ABORT) { return void abort(); } + // the first message might be metadata... ignore it if so + if (i++ === 0 && msgObj.buff.indexOf('{') === 0) { + return readMore(); + } + + var s_msg = msgObj.buff.toString('utf8'); + if (retain) { + // if this flag is set then you've already found + // the message you were looking for. + // write it to your temp buffer and keep going + return void tempStream.write(s_msg + '\n', function () { + readMore(); + }); + } + + var msg = Util.tryParse(s_msg); + var msgHash = Extras.getHash(msg[4]); + + if (msgHash === hash) { + // everything from this point on should be retained + retain = true; + return void tempStream.write(msgObj.buff, function () { + readMore(); + }); + } + }; + + readMessagesBin(env, channelName, 0, handler, w(function (err) { + if (err) { + w.abort(); + return void cleanUp(function () { + // intentionally call back with main error + // not the cleanup error + cb(err); + }); + } + + if (!retain) { + // you never found the message you were looking for + // this whole operation is invalid... + // clean up, abort, and call back with an error + + w.abort(); + cleanUp(function () { + // intentionally call back with main error + // not the cleanup error + cb('HASH_NOT_FOUND'); + }); + } + })); + }).nThen(function (w) { + // copy existing channel to the archive + Fse.copy(channelPath, archiveChannelPath, w(function (err) { + if (!err || err.code === 'ENOENT') { return; } + w.abort(); + cleanUp(function () { + cb(err); + }); + })); + + // copy existing metadaata to the archive + Fse.copy(metadataPath, archiveMetadataPath, w(function (err) { + if (!err || err.code === 'ENOENT') { return; } + w.abort(); + cleanUp(function () { + cb(err); + }); + })); + }).nThen(function (w) { + // overwrite the existing metadata log with the current metadata state + Fs.writeFile(metadataPath, JSON.stringify(metadataReference.meta) + '\n', w(function (err) { + // this shouldn't happen, but if it does your channel might be messed up :( + if (err) { + w.abort(); + cb(err); + } + })); + + // overwrite the existing channel with the temp log + Fse.move(tempChannelPath, channelPath, { + overwrite: true, + }, w(function (err) { + // this shouldn't happen, but if it does your channel might be messed up :( + if (err) { + w.abort(); + cb(err); + } + })); + }).nThen(function () { + // clean up and call back with no error + // triggering a historyKeeper index cache eviction... + cleanUp(function () { + cb(); + }); + }); }; -*/ -module.exports.create = function ( - conf /*:ChainPadServer_Config_t*/, - cb /*:(store:ChainPadServer_Storage_t)=>void*/ -) { + +module.exports.create = function (conf, cb) { var env = { root: conf.filePath || './datastore', archiveRoot: conf.archivePath || './data/archive', - retainData: conf.retainData, channels: { }, channelExpirationMs: conf.channelExpirationMs || 30000, verbose: conf.verbose, @@ -909,6 +1063,24 @@ module.exports.create = function ( }; var it; + /* our scheduler prioritizes and executes tasks with respect + to all other tasks invoked with an identical key + (typically the id of the concerned channel) + + it assumes that all tasks can be categorized into three types + + 1. unordered tasks such as streaming reads which can take + a long time to complete. + + 2. ordered tasks such as appending to a file which does not + take very long, but where priority is important. + + 3. blocking tasks such as rewriting a file where it would be + dangerous to perform any other task concurrently. + + */ + var schedule = env.schedule = Schedule(); + nThen(function (w) { // make sure the store's directory exists Fse.mkdirp(env.root, PERMISSIVE, w(function (err) { @@ -928,43 +1100,80 @@ module.exports.create = function ( // write a new message to a log message: function (channelName, content, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - message(env, channelName, content, cb); + schedule.ordered(channelName, function (next) { + message(env, channelName, content, Util.both(cb, next)); + }); }, // iterate over all the messages in a log getMessages: function (channelName, msgHandler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getMessages(env, channelName, msgHandler, cb); + schedule.unordered(channelName, function (next) { + getMessages(env, channelName, msgHandler, Util.both(cb, next)); + }); }, // NEWER IMPLEMENTATIONS OF THE SAME THING // write a new message to a log messageBin: (channelName, content, cb) => { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - messageBin(env, channelName, content, cb); + schedule.ordered(channelName, function (next) { + messageBin(env, channelName, content, Util.both(cb, next)); + }); }, // iterate over the messages in a log readMessagesBin: (channelName, start, asyncMsgHandler, cb) => { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - readMessagesBin(env, channelName, start, asyncMsgHandler, cb); +// FIXME there is a race condition here +// historyKeeper reads the file to find the byte offset of the first interesting message +// then calls this function again to read from that point. +// If this task is in the queue already when the file is read again +// then that byte offset will have been invalidated +// and the resulting stream probably won't align with message boundaries. +// We can evict the cache in the callback but by that point it will be too late. +// Presumably we'll need to bury some of historyKeeper's logic into a filestore method +// in order to make index/read sequences atomic. +// Otherwise, we can add a new task type to the scheduler to take invalidation into account... +// either method introduces significant complexity. + schedule.unordered(channelName, function (next) { + readMessagesBin(env, channelName, start, asyncMsgHandler, Util.both(cb, next)); + }); }, // METHODS for deleting data // remove a channel and its associated metadata log if present removeChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - removeChannel(env, channelName, function (err) { - cb(err); +// FIXME there's another race condition here... +// when a remove and an append are scheduled in that order +// the remove will delete the channel's metadata (including its validateKey) +// then the append will recreate the channel and insert a message. +// clients that are connected to the channel via historyKeeper should be kicked out +// however, anyone that connects to that channel in the future will be able to read the +// signed message, but will not find its validate key... +// resulting in a junk/unusable document + schedule.ordered(channelName, function (next) { + removeChannel(env, channelName, Util.both(cb, next)); }); }, // remove a channel and its associated metadata log from the archive directory removeArchivedChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - removeArchivedChannel(env, channelName, cb); + schedule.ordered(channelName, function (next) { + removeArchivedChannel(env, channelName, Util.both(cb, next)); + }); }, // clear all data for a channel but preserve its metadata clearChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - clearChannel(env, channelName, cb); + schedule.ordered(channelName, function (next) { + clearChannel(env, channelName, Util.both(cb, next)); + }); + }, + trimChannel: function (channelName, hash, cb) { + if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } + schedule.blocking(channelName, function (next) { + trimChannel(env, channelName, hash, Util.both(cb, next)); + }); }, // check if a channel exists in the database @@ -972,47 +1181,85 @@ module.exports.create = function ( if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } // construct the path var filepath = mkPath(env, channelName); - channelExists(filepath, cb); +// (ansuz) I'm uncertain whether this task should be unordered or ordered. +// there's a round trip to the client (and possibly the user) before they decide +// to act on the information of whether there is already content present in this channel. +// so it's practically impossible to avoid race conditions where someone else creates +// some content before you. +// if that's the case, it's basically impossible that you'd generate the same signing key, +// and thus historykeeper should reject the signed messages of whoever loses the race. +// thus 'unordered' seems appropriate. + schedule.unordered(channelName, function (next) { + channelExists(filepath, Util.both(cb, next)); + }); }, // check if a channel exists in the archive isChannelArchived: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } // construct the path var filepath = mkArchivePath(env, channelName); - channelExists(filepath, cb); +// as with the method above, somebody might remove, restore, or overwrite an archive +// in the time that it takes to answer this query and to execute whatever follows. +// since it's impossible to win the race every time let's just make this 'unordered' + schedule.unordered(channelName, function (next) { + channelExists(filepath, Util.both(cb, next)); + }); }, // move a channel from the database to the archive, along with its metadata archiveChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - archiveChannel(env, channelName, cb); +// again, the semantics around archiving and appending are really muddy. +// so I'm calling this 'unordered' again + schedule.unordered(channelName, function (next) { + archiveChannel(env, channelName, Util.both(cb, next)); + }); }, // restore a channel from the archive to the database, along with its metadata restoreArchivedChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - unarchiveChannel(env, channelName, cb); +// archive restoration will fail if either a file or its metadata exists in the live db. +// so I'm calling this 'ordered' to give writes a chance to flush out. +// accidental conflicts are extremely unlikely since clients check the status +// of a previously known channel before joining. + schedule.ordered(channelName, function (next) { + unarchiveChannel(env, channelName, Util.both(cb, next)); + }); }, // METADATA METHODS // fetch the metadata for a channel getChannelMetadata: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getChannelMetadata(env, channelName, cb); +// The only thing that can invalid this method's results are channel archival, removal, or trimming. +// We want it to be fast, so let's make it unordered. + schedule.unordered(channelName, function (next) { + getChannelMetadata(env, channelName, Util.both(cb, next)); + }); }, // iterate over lines of metadata changes from a dedicated log readDedicatedMetadata: function (channelName, handler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getDedicatedMetadata(env, channelName, handler, cb); +// Everything that modifies metadata also updates clients, so this can be 'unordered' + schedule.unordered(channelName, function (next) { + getDedicatedMetadata(env, channelName, handler, Util.both(cb, next)); + }); }, // iterate over multiple lines of metadata changes readChannelMetadata: function (channelName, handler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - readMetadata(env, channelName, handler, cb); +// same logic as 'readDedicatedMetadata + schedule.unordered(channelName, function (next) { + readMetadata(env, channelName, handler, Util.both(cb, next)); + }); }, // write a new line to a metadata log writeMetadata: function (channelName, data, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - writeMetadata(env, channelName, data, cb); +// metadata writes are fast and should be applied in order + schedule.ordered(channelName, function (next) { + writeMetadata(env, channelName, data, Util.both(cb, next)); + }); }, // CHANNEL ITERATION @@ -1025,13 +1272,22 @@ module.exports.create = function ( getChannelSize: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - channelBytes(env, channelName, cb); +// this method should be really fast and it probably doesn't matter much +// if we get the size slightly before or after somebody writes a few hundred bytes to it. + schedule.ordered(channelName, function (next) { + channelBytes(env, channelName, Util.both(cb, next)); + }); }, // OTHER DATABASE FUNCTIONALITY // remove a particular channel from the cache closeChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - closeChannel(env, channelName, cb); +// It is most likely the case that the channel is inactive if we are trying to close it, +// thus it doesn't make much difference whether it's ordered or not. +// In any case, it will be re-opened if anyone tries to write to it. + schedule.ordered(channelName, function (next) { + closeChannel(env, channelName, Util.both(cb, next)); + }); }, // iterate over open channels and close any that are not active flushUnusedChannels: function (cb) { @@ -1039,7 +1295,10 @@ module.exports.create = function ( }, // write to a log file log: function (channelName, content, cb) { - message(env, channelName, content, cb); +// you probably want the events in your log to be in the correct order. + schedule.ordered(channelName, function (next) { + message(env, channelName, content, Util.both(cb, next)); + }); }, // shut down the database shutdown: function () { diff --git a/storage/tasks.js b/storage/tasks.js index 2209b3d59..bb4dbdb9c 100644 --- a/storage/tasks.js +++ b/storage/tasks.js @@ -202,22 +202,6 @@ var expire = function (env, task, cb) { var Log = env.log; var args = task.slice(2); - if (!env.retainData) { - Log.info('DELETION_SCHEDULED_EXPIRATION', { - task: task, - }); - env.store.removeChannel(args[0], function (err) { - if (err) { - Log.error('DELETION_SCHEDULED_EXPIRATION_ERROR', { - task: task, - error: err, - }); - } - cb(); - }); - return; - } - Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', { task: task, }); @@ -381,7 +365,6 @@ Tasks.create = function (config, cb) { root: config.taskPath || './tasks', log: config.log, store: config.store, - retainData: Boolean(config.retainData), }; // make sure the path exists... diff --git a/www/admin/app-admin.less b/www/admin/app-admin.less index 10e178308..84532c6c5 100644 --- a/www/admin/app-admin.less +++ b/www/admin/app-admin.less @@ -23,5 +23,29 @@ display: flex; flex-flow: column; } + + .cp-support-list-actions { + margin: 10px 0px 10px 2px; + } + + .cp-support-list-ticket:not(.cp-support-list-closed) { + .cp-support-list-message { + &:last-child:not(.cp-support-fromadmin) { + color: @colortheme_cp-red; + background-color: lighten(@colortheme_cp-red, 25%); + .cp-support-showdata { + background-color: lighten(@colortheme_cp-red, 30%); + } + } + } + } + + .cp-support-fromadmin { + color: @colortheme_logo-2; + background-color: #FFF; + .cp-support-message-content { + color: @colortheme_logo-2; + } + } } diff --git a/www/common/common-hash.js b/www/common/common-hash.js index 85ec3b36e..b60ab3306 100644 --- a/www/common/common-hash.js +++ b/www/common/common-hash.js @@ -60,6 +60,23 @@ var factory = function (Util, Crypto, Nacl) { return '/2/' + secret.type + '/view/' + Crypto.b64RemoveSlashes(data.viewKeyStr) + '/' + pass; } }; + + Hash.getHiddenHashFromKeys = function (type, secret, opts) { + opts = opts || {}; + var canEdit = (secret.keys && secret.keys.editKeyStr) || secret.key; + var mode = (!opts.view && canEdit) ? 'edit/' : 'view/'; + var pass = secret.password ? 'p/' : ''; + + if (secret.keys && secret.keys.fileKeyStr) { mode = ''; } + + var hash = '/3/' + type + '/' + mode + secret.channel + '/' + pass; + var hashData = Hash.parseTypeHash(type, hash); + if (hashData && hashData.getHash) { + return hashData.getHash(opts || {}); + } + return hash; + }; + var getFileHashFromKeys = Hash.getFileHashFromKeys = function (secret) { var version = secret.version; var data = secret.keys; @@ -160,12 +177,28 @@ Version 1 }; var parseTypeHash = Hash.parseTypeHash = function (type, hash) { if (!hash) { return; } - var options; + var options = []; var parsed = {}; var hashArr = fixDuplicateSlashes(hash).split('/'); + + var addOptions = function () { + parsed.password = options.indexOf('p') !== -1; + parsed.present = options.indexOf('present') !== -1; + parsed.embed = options.indexOf('embed') !== -1; + parsed.ownerKey = getOwnerKey(options); + }; + if (['media', 'file', 'user', 'invite'].indexOf(type) === -1) { parsed.type = 'pad'; parsed.getHash = function () { return hash; }; + parsed.getOptions = function () { + return { + embed: parsed.embed, + present: parsed.present, + ownerKey: parsed.ownerKey, + password: parsed.password + }; + }; if (hash.slice(0,1) !== '/' && hash.length >= 56) { // Version 0 // Old hash parsed.channel = hash.slice(0, 32); @@ -173,6 +206,18 @@ Version 1 parsed.version = 0; return parsed; } + + // Version >= 1: more hash options + parsed.getHash = function (opts) { + var hash = hashArr.slice(0, 5).join('/') + '/'; + var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; + if (owner) { hash += owner + '/'; } + if (parsed.password || opts.password) { hash += 'p/'; } + if (opts.embed) { hash += 'embed/'; } + if (opts.present) { hash += 'present/'; } + return hash; + }; + if (hashArr[1] && hashArr[1] === '1') { // Version 1 parsed.version = 1; parsed.mode = hashArr[2]; @@ -180,18 +225,8 @@ Version 1 parsed.key = Crypto.b64AddSlashes(hashArr[4]); options = hashArr.slice(5); - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 5).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + return parsed; } if (hashArr[1] && hashArr[1] === '2') { // Version 2 @@ -201,20 +236,19 @@ Version 1 parsed.key = hashArr[4]; options = hashArr.slice(5); - parsed.password = options.indexOf('p') !== -1; - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 5).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (parsed.password) { hash += 'p/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + + return parsed; + } + if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash + parsed.version = 3; + parsed.app = hashArr[2]; + parsed.mode = hashArr[3]; + parsed.channel = hashArr[4]; + + options = hashArr.slice(5); + addOptions(); + return parsed; } return parsed; @@ -222,34 +256,54 @@ Version 1 parsed.getHash = function () { return hashArr.join('/'); }; if (['media', 'file'].indexOf(type) !== -1) { parsed.type = 'file'; + + parsed.getOptions = function () { + return { + embed: parsed.embed, + present: parsed.present, + ownerKey: parsed.ownerKey, + password: parsed.password + }; + }; + + parsed.getHash = function (opts) { + var hash = hashArr.slice(0, 4).join('/') + '/'; + var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; + if (owner) { hash += owner + '/'; } + if (parsed.password || opts.password) { hash += 'p/'; } + if (opts.embed) { hash += 'embed/'; } + if (opts.present) { hash += 'present/'; } + return hash; + }; + if (hashArr[1] && hashArr[1] === '1') { parsed.version = 1; parsed.channel = hashArr[2].replace(/-/g, '/'); parsed.key = hashArr[3].replace(/-/g, '/'); options = hashArr.slice(4); - parsed.ownerKey = getOwnerKey(options); + addOptions(); return parsed; } + if (hashArr[1] && hashArr[1] === '2') { // Version 2 parsed.version = 2; parsed.app = hashArr[2]; parsed.key = hashArr[3]; options = hashArr.slice(4); - parsed.password = options.indexOf('p') !== -1; - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 4).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (parsed.password) { hash += 'p/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + + return parsed; + } + + if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash + parsed.version = 3; + parsed.app = hashArr[2]; + parsed.channel = hashArr[3]; + + options = hashArr.slice(4); + addOptions(); + return parsed; } return parsed; @@ -303,6 +357,10 @@ Version 1 url += '#' + hash; return url; }; + ret.getOptions = function () { + if (!ret.hashData || !ret.hashData.getOptions) { return {}; } + return ret.hashData.getOptions(); + }; if (!/^https*:\/\//.test(href)) { idx = href.indexOf('/#'); @@ -325,6 +383,14 @@ Version 1 return ret; }; + Hash.hashToHref = function (hash, type) { + return '/' + type + '/#' + hash; + }; + Hash.hrefToHash = function (href) { + var parsed = Hash.parsePadUrl(href); + return parsed.hash; + }; + Hash.getRelativeHref = function (href) { if (!href) { return; } if (href.indexOf('#') === -1) { return; } @@ -345,7 +411,7 @@ Version 1 secret.version = 2; secret.type = type; }; - if (!secretHash && !window.location.hash) { //!/#/.test(window.location.href)) { + if (!secretHash) { generate(); return secret; } else { @@ -355,12 +421,7 @@ Version 1 if (!type) { throw new Error("getSecrets with a hash requires a type parameter"); } parsed = parseTypeHash(type, secretHash); hash = secretHash; - } else { - var pHref = parsePadUrl(window.location.href); - parsed = pHref.hashData; - hash = pHref.hash; } - //var hash = secretHash || window.location.hash.slice(1); if (hash.length === 0) { generate(); return secret; @@ -496,8 +557,8 @@ Version 1 if (typeof(parsed.hashData.version) === "undefined") { return; } // pads and files should have a base64 (or hex) key if (parsed.hashData.type === 'pad' || parsed.hashData.type === 'file') { - if (!parsed.hashData.key) { return; } - if (!/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; } + if (!parsed.hashData.key && !parsed.hashData.channel) { return; } + if (parsed.hashData.key && !/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; } } } return true; diff --git a/www/common/common-interface.js b/www/common/common-interface.js index 9cc1c1efb..b9869ba67 100644 --- a/www/common/common-interface.js +++ b/www/common/common-interface.js @@ -70,6 +70,7 @@ define([ if (typeof(yes) === 'function') { yes(e); } break; } + $(el || window).off('keydown', handler); }; $(el || window).keydown(handler); @@ -491,6 +492,11 @@ define([ $ok.focus(); Notifier.notify(); }); + + return { + element: frame, + delete: close + }; }; UI.prompt = function (msg, def, cb, opt, force) { @@ -582,7 +588,7 @@ define([ $ok.click(); }, function () { $cancel.click(); - }, ok); + }, frame); document.body.appendChild(frame); setTimeout(function () { @@ -1050,39 +1056,36 @@ define([ return radio; }; + var corner = { + queue: [], + state: false + }; UI.cornerPopup = function (text, actions, footer, opts) { opts = opts || {}; - var minimize = h('div.cp-corner-minimize.fa.fa-window-minimize'); - var maximize = h('div.cp-corner-maximize.fa.fa-window-maximize'); + var dontShowAgain = h('div.cp-corner-dontshow', [ + h('span.fa.fa-times'), + Messages.dontShowAgain + ]); + var popup = h('div.cp-corner-container', [ - minimize, - maximize, - h('div.cp-corner-filler', { style: "width:110px;" }), - h('div.cp-corner-filler', { style: "width:80px;" }), - h('div.cp-corner-filler', { style: "width:60px;" }), - h('div.cp-corner-filler', { style: "width:40px;" }), - h('div.cp-corner-filler', { style: "width:20px;" }), setHTML(h('div.cp-corner-text'), text), h('div.cp-corner-actions', actions), - setHTML(h('div.cp-corner-footer'), footer) + setHTML(h('div.cp-corner-footer'), footer), + opts.dontShowAgain ? dontShowAgain : undefined ]); var $popup = $(popup); - $(minimize).click(function () { - $popup.addClass('cp-minimized'); - }); - $(maximize).click(function () { - $popup.removeClass('cp-minimized'); - }); - if (opts.hidden) { $popup.addClass('cp-minimized'); } if (opts.big) { $popup.addClass('cp-corner-big'); } + if (opts.alt) { + $popup.addClass('cp-corner-alt'); + } var hide = function () { $popup.hide(); @@ -1092,9 +1095,28 @@ define([ }; var deletePopup = function () { $popup.remove(); + if (!corner.queue.length) { + corner.state = false; + return; + } + setTimeout(function () { + $('body').append(corner.queue.pop()); + }, 5000); }; - $('body').append(popup); + $(dontShowAgain).click(function () { + deletePopup(); + if (typeof(opts.dontShowAgain) === "function") { + opts.dontShowAgain(); + } + }); + + if (corner.state) { + corner.queue.push(popup); + } else { + corner.state = true; + $('body').append(popup); + } return { popup: popup, @@ -1104,5 +1126,36 @@ define([ }; }; + UI.makeSpinner = function ($container) { + var $ok = $('', {'class': 'fa fa-check', title: Messages.saved}).hide(); + var $spinner = $('', {'class': 'fa fa-spinner fa-pulse'}).hide(); + + var spin = function () { + $ok.hide(); + $spinner.show(); + }; + var hide = function () { + $ok.hide(); + $spinner.hide(); + }; + var done = function () { + $ok.show(); + $spinner.hide(); + }; + + if ($container && $container.append) { + $container.append($ok); + $container.append($spinner); + } + + return { + ok: $ok[0], + spinner: $spinner[0], + spin: spin, + hide: hide, + done: done + }; + }; + return UI; }); diff --git a/www/common/common-messaging.js b/www/common/common-messaging.js index feb3d79d3..15d0408f7 100644 --- a/www/common/common-messaging.js +++ b/www/common/common-messaging.js @@ -53,10 +53,18 @@ define([ return list; }; + Msg.declineFriendRequest = function (store, data, cb) { + store.mailbox.sendTo('DECLINE_FRIEND_REQUEST', {}, { + channel: data.notifications, + curvePublic: data.curvePublic + }, function (obj) { + cb(obj); + }); + }; Msg.acceptFriendRequest = function (store, data, cb) { var friend = getFriend(store.proxy, data.curvePublic) || {}; var myData = createData(store.proxy, friend.channel || data.channel); - store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', myData, { + store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', { user: myData }, { channel: data.notifications, curvePublic: data.curvePublic }, function (obj) { @@ -110,7 +118,7 @@ define([ var proxy = store.proxy; var friend = proxy.friends[curvePublic]; if (!friend) { return void cb({error: 'ENOENT'}); } - if (!friend.notifications || !friend.channel) { return void cb({error: 'EINVAL'}); } + if (!friend.notifications) { return void cb({error: 'EINVAL'}); } store.mailbox.sendTo('UNFRIEND', { curvePublic: proxy.curvePublic diff --git a/www/common/common-ui-elements.js b/www/common/common-ui-elements.js index 459891900..ec5e34cb1 100644 --- a/www/common/common-ui-elements.js +++ b/www/common/common-ui-elements.js @@ -56,6 +56,21 @@ define([ }); }; + var dcAlert; + UIElements.disconnectAlert = function () { + if (dcAlert && $(dcAlert.element).length) { return; } + dcAlert = UI.alert(Messages.common_connectionLost, undefined, true); + }; + UIElements.reconnectAlert = function () { + if (!dcAlert) { return; } + if (!dcAlert.delete) { + dcAlert = undefined; + return; + } + dcAlert.delete(); + dcAlert = undefined; + }; + var importContent = function (type, f, cfg) { return function () { var $files = $('', {type:"file"}); @@ -212,15 +227,7 @@ define([ common.mailbox.sendTo("RM_OWNER", { channel: channel, title: data.title, - pending: pending, - user: { - displayName: user.name, - avatar: user.avatar, - profile: user.profile, - notifications: user.notifications, - curvePublic: user.curvePublic, - edPublic: priv.edPublic - } + pending: pending }, { channel: friend.notifications, curvePublic: friend.curvePublic @@ -363,15 +370,7 @@ define([ channel: channel, href: data.href, password: data.password, - title: data.title, - user: { - displayName: user.name, - avatar: user.avatar, - profile: user.profile, - notifications: user.notifications, - curvePublic: user.curvePublic, - edPublic: priv.edPublic - } + title: data.title }, { channel: friend.notifications, curvePublic: friend.curvePublic @@ -548,21 +547,27 @@ define([ if (!data.noPassword) { var hasPassword = data.password; + var $pwLabel = $('