diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..e28e69132 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,45 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Where did it happen?** +Did the issue occur on CryptPad.fr or an instance hosted by a third-party? +If on another instance, please provide its full URL. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Browser (please complete the following information):** + - OS: [e.g. iOS] + - Browser [e.g. firefox, tor browser, chrome, safari, brave, edge, ???] + - variations [e.g. Firefox nightly, Firefox ESR, Chromium, Ungoogled chrome] + - Version [e.g. 22] + - Extensions installed (UBlock Origin, Passbolt, LibreJS] + - Browser tweaks [e.g. firefox "Enhanced Tracking Protection" strict/custom mode, tor browser "safer" security level, chrome incognito mode] + +**Smartphone (please complete the following information):** + - Device: [e.g. iPhone6] + - OS: [e.g. iOS8.1] + - Browser [e.g. stock browser, safari] + - Version [e.g. 22] + +**Additional context** +Add any other context about the problem here. diff --git a/.gitignore b/.gitignore index d96f6e6ac..50796e9bb 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,4 @@ block/ logs/ privileged.conf config/config.js - +*yolo.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index fcfdee9b1..b8ceec5f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,203 @@ +# OrienteCaveRat release (3.14.0) + +## Goals + +We planned a one-week release cycle in order to finish up some major features that were already in development during our last release. + +In the meantime, the reaction to the COVID-19 pandemic has resulted in a greatly increased load on our servers, so we've begun to focus on improving stability to ensure that we are able to keep up with demand. + +## Update notes + +We had some trouble during the week of March 9th, 2020, as the CryptPad.fr server started throwing EMFILE errors. This means that it was trying to open new files (for reading or writing) but there were too many files open already. We've added some new code to help debug the issue, but there is not yet a fix in place. The maximum number of open files on our host OS had been increased by several orders of magnitude (several years ago) but we're now aware that the systemd service file that launches the API server does not respect this global limit. As such, we've updated the example service file to indicate how you can update this limit yourself. For an example of how to update this limit at the OS level, see this page: https://docs.oracle.com/cd/E19623-01/820-6168/file-descriptor-requirements.html + +Otherwise, updating from 3.13.0 to 3.14.0 is as usual: + +1. stop your server +2. fetch the latest source +3. `npm i` +4. `bower update` +5. restart your server + +## Features + +We're very happy to announce a major update to our kanban application! We've made a lot of changes, but the most notables ones are: + +* the ability to add markdown content to your cards and edit it collaboratively in real-time +* tags on cards and the ability to filter cards by tags at the top of the application +* indicators to show if a card is being modified by another user while you are editing it +* the ability to toggle between an 'overview mode' which hides everything but your cards titles and a full mode which shows everything +* vertical scrolling for very tall columns, and horizontal scrolling for columns that don't fit on your screen (intead of reflowing to the next line) +* a smaller palette of pre-chosen colors for cards and boards instead of a color-picker, to make it easier to choose matching colors for tasks +* the ability to drag cards and boards to the trash instead of having to click a small X and confirm their deletion + +## Bug fixes + +* Drive: + * a regression in the drive for anonymous users made it impossible to delete contained pads directly from the drive (though deletion from the pad itself was working). It's now back to normal. + * we've updated the translation key referenced in [issue 482](https://github.com/xwiki-labs/cryptpad/issues/482) to clarify what qualifies a pad as "recently modified". +* We noticed (and fixed) another regression that disabled our recently introduced "history trim" functionality. +* We've identified and addressed a few client networking errors that were causing clients to disconnect (and to get stuck in a reconnecting state), but we're still actively looking for more. +* Server: + * we've added some extra checks to try to identify where our file descriptor leak is coming from, we'll release fixes as they become available. + * we've caught a typeError that only ever happened while the server was overwhelmed with EMFILE errors. + * [this PR](https://github.com/xwiki-labs/cryptpad/pull/503) fixed an incorrect conditional expression at launch-time. + +# NorthernWhiteRhino release (3.13.0) + +## Goals + +This release cycle we prioritized the completion of "access lists", a major feature that we're excited to introduce. + +## Update notes + +Nearly every week (sometimes more than once) we end up taking time away from development to help administrators to configure their CryptPad instances. We're happy to see more instances popping up, but ideally we'd like to spend more of our time working on new features. With this in mind we devoted some time to simplify instance configuration and to clarify some points where people commonly have difficulty. + +If you review `cryptpad/config.example.js` you'll notice it is significantly smaller than it was last release. +Old configuration files should be backwards compatible (if you copied `config.example.js` to `config.js` in order to customize it). +The example has been reorganized so that the most important parts (which people seemed to miss most of the time) are at the top. +Most of the fields which were defined within the config file now have defaults defined within the server itself. +If you supply these values they will override the default, but for the most part they can be removed. + +We advise that you read the comments at the top of the example, in particular the points related to `httpUnsafeOrigin` and `httpSafeOrigin` which are used to protect users' cryptographic keys in the event of a cross-site scripting (XSS) vulnerability. +If these values are not correctly set then your users will not benefit from all the security measures we've spent lots of time implemented. + +A lot of the fields that were present as modifiable defaults have been removed or commented out in the example config. +If you supply them then they will override the default behaviour, however, you probably won't need to and doing so might break important functionality. +Content-Security Policy (CSP) definitions should be safe to remove, as should `httpAddress`, `httpPort`, and `httpSafePort` (unless you need to run the nodejs API server on an address other than `localhost` or port 3000. + +Up until now it's been possible for administrators to allow users to pay for accounts (on their server) via https://accounts.cryptpad.fr. +Our intent was to securely handle payment and then split the proceeds between ourselves and the instance's administrator. +In practice this just created extra work for us because we ended up having to contact admins, all of whom have opted to treat the subscription as a donation to support development. +As such we have disabled the ability of users to pay for premium subscriptions (on https://accounts.cryptpad.fr) for any instance other than our own. + +Servers with premium subscriptions enabled were configured to check whether anyone had subscribed to a premium account by querying our accounts server on a daily basis. +We've left this daily check in place despite premium subscriptions being disabled because it informs us how many third-party instances exist and what versions they are running. +We don't sell or share this information with anyone, but it is useful to us because it informs us what older data structures we have to continue to support. +For instance, we retain code for migrating documents to newer data formats as long as we know that there are still instances that have not run those migrations. +We also cite the number of third-party instances when applying for grants as an indicator of the value of funding our project. +In any case, you can disable this daily check-in by setting `blockDailyCheck` to `true` in `config/config.js`. + +Finally, we've implemented the ability to set a higher limit on the maximum size of uploaded files for premium users (paying users on CryptPad.fr and users with entries in `customLimits` on other instances). +Set this limit as a number (of bytes) with `premiumUploadSize` in your config file. + +## Features + +* It is often difficult to fix problems reported as GitHub issues because we don't have enough information. The platform's repository now includes an _issue template_ which includes a list of details that will probably be relevant to fixing bugs. Please read the list carefully, as we'll probably just close issues if information that we need was not included. +* We've made it easy to terminate all open sessions for your account. If you're logged in, you'll now see a _log out everywhere_ button in the _user admin menu_ (in the top-right corner of the screen). + * You may still terminate only _remote sessions_ while leaving your local session intact via the pre-existing button on the settings page's _confidentiality_ tab. +* You may have noticed that it takes progressively longer to load your account as you add more files to your drive, shared folders, and teams. This is because an integrity check is run on all your files when you first launch a CryptPad session. We optimized some parts of this check to speed it up. We plan to continue searching for similar processes that we can optimize in order to decrease loading time and run-time efficiency. +* Lastly, this release introduces **access lists**, which you can use to limit who can view your documents _even if they have the keys required to decrypt them_. You can do so by using the _Access_ modal for any given document, available in the `...` dropdown menu in each app's toolbar or when right-clicking in the drive. + * Enabling access restriction for a document will disallow anyone except its owners or allowed users from opening it. Anyone else who is currently editing or viewing the document will be disconnected from the session. + +## Bug fixes + +* A member of _C3Wien_ reported some strange behaviour triggered by customizing some of Firefox's anti-tracking features. The settings incorrectly identified our cross-domain sandboxing system as a tracker and interfered with its normal functionality. As a result, the user was treated as though they were not logged in, even though pads from their account's drive were displayed within the "anonymous drive" that unregistered users normally see. + * This was simple to fix, requiring only that we adjust our method of checking whether a user is logged in. + * If you ever notice odd behaviour we do recommend that you review any customizations you've made to your browser, as we only test CryptPad under default conditions unless prompted to investigate an issue. +* Users that take advantage of the Mermaid renderer in our markdown editor's preview pane may have noticed that the preview's scroll position was lost whenever mermaid charts were modified. We've updated our renderer such that it preserves scroll position when redrawing elements, making it easier to see the effects of your changes when editing large charts. + +# Megaloceros release (3.12.0) + +## Goals + +As of our last release our 'history trim' functionality was almost ready to go. We took this release period to do some extensive testing and to prepare the 'allow list' functionality which will be included in our next release. + +In the meantime, we also aimed to improve performance, add a few small but nice features, and fix a number of bugs. + +## Update notes + +This release includes updates to: + +1. the server and its dependencies +2. the example nginx configuration which we recommend for production installations +4. the client code and its dependencies + +Our ability to debug CryptPad's usage of shared workers (on the client) has been complicated by the fact that Firefox's shared worker debugging panel was not working for our instance. We finally traced the problem back to a Content-Security Policy setting in our configuration file. The issue can be addressed by adding a `resource:` entry in the `connect-src` header. We've updated the example nginx config to reflect this. You can deploy this version of CryptPad without this modification, but without it our ability to debug and fix issues related to shared worker will be extremely limited. + +Otherwise, updating from CryptPad v3.11.0 is pretty much the same as normal: + +1. stop your server +2. pull the latest code via git +3. `npm i` to get the latest server dependencies +4. `bower update` to get the latest client dependencies +5. restart your server + +## Features + +* The CryptPad server stores documents as a series of encrypted changes to a blank document. We have mechanisms in place that make it so clients only need the most recent changes to view the document, but the storage requirements on the server would only ever grow unless you deleted the entire document. As of this release, owners of document have the option to remove that unnecessary history. To do so: right-click a pad in a drive or shared folder and choose the properties option in the menu. The bottom of the properties popup will display the document's size. If there is any history that is eligible for removal, a button will be displayed to remove it. + * This option is only available for the pad's owners. If it has no owners then it will not be possible to remove its history. + * It is not yet possible to trim the history of spreadsheets, as they are based on a different system than the rest of our documents and it will take some additional work to add this functionality. +* We've also added the ability to easily make copies of documents from your drive. Right-click on documents and select "make a copy" from the menu. + * This feature doesn't work for files. Files can't be modified anyway, so there's little value in making copies. + * We haven't added the ability to make a copy of a spreadsheet yet for the same reasons as above. +* We've improved the way our markdown renderer handles links to better support a variety of types of URLs: + * anchors, like `[bug fixes](#bug-fixes)` + * relative paths, like `[cryptpad home page](/index.html)` or `[a rich text pad](/pad/#/pad/view/12151241241254123412451231231221)` + * absolute URLs without the protocol, like `[//github.com/xwiki-labs/cryptpad) +* We've optimized a background process that iterates over a part of the database when you first launch the CryptPad server. It now uses less memory and should incur less load on the CPU when restarting the server. This should allow the server to spend its resources handling clients that are trying to reconnect. +* We've also optimized some client-side code to prioritize loading your drive instead of some other non-essential resources used for notifications. Pages should load faster. We're working on some related improvements to address page load time which we'll introduce on an ongoing basis. +* As noted above, we're finally able to debug shared workers in Firefox. We're investigating a few issues that were blocked by this limitation, and we hope to include a number of bug fixes in upcoming releases. +* We've continued some ongoing improvements to the instance admin panel and introduced the ability to link directly to a support ticket. The link will only be useful to users who would already be able to open the admin panel. +* The code responsible for fetching and scanning the older history of a document has also been optimized to avoid handling messages for channels multiple times. +* Finally, we've received contributions from our German and Italian translators via our weblate instance. + * We're always looking for more help with localization. You can review the status of our translations and contribute to them [here](https://weblate.cryptpad.fr/projects/cryptpad/app/). + +## Bug fixes + +* After a lot of digging we believe we've identified and fixed a case of automatic text duplication in our rich text editor. We plan to wait a little longer and see if [reports of the incorrect behaviour](https://github.com/xwiki-labs/cryptpad/issues/352) really do stop, but we're optimistic that this problem has been solved. +* [Another GitHub issue](https://github.com/xwiki-labs/cryptpad/issues/497) related to upgrading access for team members has been fixed. If you continue to have issues with permissions for team members, we recommend haging the team owner demote the affected users to viewers before promoting them to the desired access level. +* We've fixed a number of small issues in our server: + * The server did not correctly respond to unsupported commands for its SET_METADATA RPC. Instead of responding with an error it ignored the message. In practice this should not have affected any users, since our client only uses supported commands. + * The server used to log for every entry in a document's metadata log that contained an unsupported command. As we develop we occasionally have to such logs with older versions of the code that don't support every command. To avoid filling the logs with errors, we now ignore any errors of a given type beyond the first one encountered for a given document. +* We've fixed an issue with read-only spreadsheets that was introduced in our previous release. An overlay intended to prevent users from interacting with the spreadsheet while disconnected was incorrectly applied to spreadsheets in read-only mode, preventing users from copying their data. +* Clients send "pin commands" to the server to instruct it to count a document against their quota and to preserve its data even if it's considered inactive. We realized that the client wasn't including todo-lists in its list of pads to pin and have updated the client to do so. + +# LabradorDuck release (3.11.0) + +## Goals + +For this release we aimed to phase in two major features that we've been anticipating for a while: "history trim" and "safe links". + +History trim will allow users to remove the old versions of their documents which continue to count against their storage quotas. It will be formally introduced in our next release, even though its server-side components are all ready. We had to reorganize and modify a lot of our server code, so we wanted to wait and make sure there were no regressions in our existing functionality before moving ahead. + +We're introducing the concept of "safe links" in CryptPad. Users can continue to share links to documents which include the cryptographic secrets necessary to read or edit them, but whenever possible we will replace those secrets with a document id. This will make it less likely for encryption keys to be exposed to third parties through invasive browser extensions or passive behaviour like history synchronization across devices. + +## Update notes + +This release features a few changes to the server: + +1. The "legal notice" feature which we included in the previous release turned out to be incorrect. We've since fixed it. We document this functionality [here](https://github.com/xwiki-labs/cryptpad/blob/e8b905282a2cde826ad9100dcad6b59a50c70e8b/www/common/application_config_internal.js#L35-L41), but you'll need to implement the recommended changes in `cryptpad/customize/application_config.js` for best effect. +2. We've dropped server-side support for the `retainData` attribute in `cryptpad/config/config.js`. Previously you could configure CryptPad to delete unpinned, inactive data immediately or to move it into an archive for a configurable retention period. We've removed the option to delete data outright, since it introduces additional complexity in the server which we don't regularly test. We also figure that administrators will appreciate this default in the event of a bug which incorrectly flags data as inactive. +3. We've fixed an incorrect line in [the example nginx configuration file](https://github.com/xwiki-labs/cryptpad/commit/1be01c07eee3431218d0b40a58164f60fec6df31). If you're using nginx as a reverse proxy for your CryptPad instance you should correct this line. It is used to set Content-Security Policy headers for the sandboxed-iframe which provides an additional layer of security for users in the event of a cross-site-scripting (XSS) vulnerability within CryptPad. If you find that your instance stops working after applying this change it is likely that you have not correctly configured your instance to use a secondary domain for its sandbox. See [this section of `cryptpad/config/config.example.js`](https://github.com/xwiki-labs/cryptpad/blob/c388641479128303363d8a4247f64230c08a7264/config/config.example.js#L94-L96) for more information. + +Otherwise, deploying the new code should be fairly simple: + +1. stop your server +2. fetch the latest code from the git repository +3. update your server dependencies with `npm install` +4. update your clientside dependencies with `bower update` +5. start your server + +## Features + +* We've slightly reorganized the _settings_ page to include a new "Confidentiality" section. It includes a checkbox to enable "safe links", which will remove the cryptographic secrets from your documents' URLs whenever possible. It is currently off by default but will most likely default to true in the near future. Otherwise, the settings page has an updated layout which is generally easier to read. +* We've remove the "Owned pads" category from the CryptDrive application. It was included to provide an overview of pads that you could delete when we first introduced that functionality, however, we've realized that it is generally not very useful. +* We implemented the ability to convert a regular folder in your drive into a _shared folder_ several months ago, but disabled it when we discovered that it had some bugs. We finally got around to fixing those bugs and so it is officially ready for public use. +* We've continued to make little changes to improve the discoverability of CryptPad's social features. Unregistered users that view another user's profile are now informed that they can send that profile's owner a contact request once they register. +* You may remember that CryptPad's contacts used to be called "friends". We've changed this terminology to reflect that you might work with people with whom you do not have a close personal relationship. +* We analyzed CryptPad for possible vectors for social abuse as a part of our _Teams_ project, sponsored by NLnet foundation. During this audit we identified that the main method for abuse was through the direct messaging/notifications system. We added the ability to mute users, but realized it could be difficult to find the profile page of the person you want to mute. As of this release, any notification triggered by a remote user's actions will include their avatar and a link to their profile. If you find any user's behaviour abusive or annoying you can go straight to their profile and mute them. +* We've made a small improvements to the admin panel's support ticket view. Tickets which have not received a response are now highlighted in red. +* The login/register pages had a minor bug where the loading screen was not correctly displayed the second time you tried to enter your password. This was because the key derivation function which unlocks the corresponding user credentials was keeping the CPU busy and preventing an animation from running. It has since been corrected. +* We've continued to make some small but important changes to various UI elements that are reused throughout the platform. The password field in the _pad properties dialog_ has been tweaked for better color contrast. Similarly, the small notice that pops up in the bottom right hand corner to prompt you to store a pad in your drive has been restyled. We've also implemented a second variation on this popup to display general information not directly related to the current pad. Both of these UI elements better match the general appearance of the rest of the platform and represent a continued effort to improve its visual consistency. +* The spreadsheet editor has received some attention in the last few weeks as well. It is now able to gracefully resume a session when you reconnect to the server after an interruption. Likewise, the locking system which prevents two users from editing a cell at the same time is now significantly faster, and completely disabled if you're editing alone. Now that it's possible for unregistered users to edit spreadsheets we've had to improve the color contrast for the toolbar message which prompts users to register in order to ensure that a spreadsheet isn't deleted due to inactivity. +* The "file upload status table" has received some attention as well, in response to [issue 496](https://github.com/xwiki-labs/cryptpad/issues/496). When you upload many files to CryptPad in a row you'll see them all displayed in a table which will include a scrollbar if necessary. + +## Bug fixes + +* [Issue 441](https://github.com/xwiki-labs/cryptpad/issues/441 "Other users writing in pad hiijacks chat window") has been fixed. +* We found a bug that affected encrypted files saved to your CryptDrive via the right-click menu. The files were saved in an incorrect format and were unusable. They should behave normally now. +* Finally, we identified a race condition whereby if two users sent each other contact requests at the same time the request might not be accepted correctly. This process should now be much more reliable. + # Kouprey release (3.10.0) ## Goals diff --git a/bower.json b/bower.json index b67900338..88d92e18d 100644 --- a/bower.json +++ b/bower.json @@ -30,7 +30,7 @@ "secure-fabric.js": "secure-v1.7.9", "hyperjson": "~1.4.0", "chainpad-crypto": "^0.2.0", - "chainpad-listmap": "^0.7.0", + "chainpad-listmap": "^0.8.1", "chainpad": "^5.1.0", "file-saver": "1.3.1", "alertifyjs": "1.0.11", diff --git a/config/config.example.js b/config/config.example.js index 9981c0626..273c196d2 100644 --- a/config/config.example.js +++ b/config/config.example.js @@ -1,68 +1,110 @@ -/*@flow*/ -/* - globals module -*/ -var _domain = 'http://localhost:3000/'; - -// You can `kill -USR2` the node process and it will write out a heap dump. -// If your system doesn't support dumping, comment this out and install with -// `npm install --production` -// See: https://strongloop.github.io/strongloop.com/strongblog/how-to-heap-snapshots/ +/* globals module */ -// to enable this feature, uncomment the line below: -// require('heapdump'); +/* DISCLAIMER: -// we prepend a space because every usage expects it -// requiring admins to preserve it is unnecessarily confusing -var domain = ' ' + _domain; + There are two recommended methods of running a CryptPad instance: -// Content-Security-Policy -var baseCSP = [ - "default-src 'none'", - "style-src 'unsafe-inline' 'self' " + domain, - "font-src 'self' data:" + domain, + 1. Using a standalone nodejs server without HTTPS (suitable for local development) + 2. Using NGINX to serve static assets and to handle HTTPS for API server's websocket traffic - /* child-src is used to restrict iframes to a set of allowed domains. - * connect-src is used to restrict what domains can connect to the websocket. - * - * it is recommended that you configure these fields to match the - * domain which will serve your CryptPad instance. - */ - "child-src blob: *", - // IE/Edge - "frame-src blob: *", + We do not officially recommend or support Apache, Docker, Kubernetes, Traefik, or any other configuration. + Support requests for such setups should be directed to their authors. - /* this allows connections over secure or insecure websockets - if you are deploying to production, you'll probably want to remove - the ws://* directive, and change '*' to your domain - */ - "connect-src 'self' ws: wss: blob:" + domain, + If you're having difficulty difficulty configuring your instance + we suggest that you join the project's IRC/Matrix channel. - // data: is used by codemirror - "img-src 'self' data: blob:" + domain, - "media-src * blob:", + If you don't have any difficulty configuring your instance and you'd like to + support us for the work that went into making it pain-free we are quite happy + to accept donations via our opencollective page: https://opencollective.com/cryptpad - // for accounts.cryptpad.fr authentication and cross-domain iframe sandbox - "frame-ancestors *", - "" -]; +*/ +module.exports = { +/* CryptPad is designed to serve its content over two domains. + * Account passwords and cryptographic content is handled on the 'main' domain, + * while the user interface is loaded on a 'sandbox' domain + * which can only access information which the main domain willingly shares. + * + * In the event of an XSS vulnerability in the UI (that's bad) + * this system prevents attackers from gaining access to your account (that's good). + * + * Most problems with new instances are related to this system blocking access + * because of incorrectly configured sandboxes. If you only see a white screen + * when you try to load CryptPad, this is probably the cause. + * + * PLEASE READ THE FOLLOWING COMMENTS CAREFULLY. + * + */ + +/* httpUnsafeOrigin is the URL that clients will enter to load your instance. + * Any other URL that somehow points to your instance is supposed to be blocked. + * The default provided below assumes you are loading CryptPad from a server + * which is running on the same machine, using port 3000. + * + * In a production instance this should be available ONLY over HTTPS + * using the default port for HTTPS (443) ie. https://cryptpad.fr + * In such a case this should be handled by NGINX, as documented in + * cryptpad/docs/example.nginx.conf (see the $main_domain variable) + * + */ + httpUnsafeOrigin: 'http://localhost:3000/', + +/* httpSafeOrigin is the URL that is used for the 'sandbox' described above. + * If you're testing or developing with CryptPad on your local machine then + * it is appropriate to leave this blank. The default behaviour is to serve + * the main domain over port 3000 and to serve the content over port 3001. + * + * This is not appropriate in a production environment where invasive networks + * may filter traffic going over abnormal ports. + * To correctly configure your production instance you must provide a URL + * with a different domain (a subdomain is sufficient). + * It will be used to load the UI in our 'sandbox' system. + * + * This value corresponds to the $sandbox_domain variable + * in the example nginx file. + * + * CUSTOMIZE AND UNCOMMENT THIS FOR PRODUCTION INSTALLATIONS. + */ + // httpSafeOrigin: "https://some-other-domain.xyz", +/* httpAddress specifies the address on which the nodejs server + * should be accessible. By default it will listen on 127.0.0.1 + * (IPv4 localhost on most systems). If you want it to listen on + * all addresses, including IPv6, set this to '::'. + * + */ + //httpAddress: '::', + +/* httpPort specifies on which port the nodejs server should listen. + * By default it will serve content over port 3000, which is suitable + * for both local development and for use with the provided nginx example, + * which will proxy websocket traffic to your node server. + * + */ + //httpPort: 3000, + +/* httpSafePort allows you to specify an alternative port from which + * the node process should serve sandboxed assets. The default value is + * that of your httpPort + 1. You probably don't need to change this. + * + */ + //httpSafePort: 3001, -module.exports = { /* ===================== * Admin * ===================== */ /* - * CryptPad now contains an administration panel. Its access is restricted to specific + * CryptPad contains an administration panel. Its access is restricted to specific * users using the following list. * To give access to the admin panel to a user account, just add their user id, * which can be found on the settings page for registered users. * Entries should be strings separated by a comma. */ +/* adminKeys: [ //"https://my.awesome.website/user/#/1/cryptpad-user1/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=", ], +*/ /* CryptPad's administration panel includes a "support" tab * wherein administrators with a secret key can view messages @@ -77,118 +119,77 @@ module.exports = { */ // supportMailboxPublicKey: "", - /* ===================== - * Infra setup - * ===================== */ - - // the address you want to bind to, :: means all ipv4 and ipv6 addresses - // this may not work on all operating systems - httpAddress: '::', - - // the port on which your httpd will listen - httpPort: 3000, - - // This is for allowing the cross-domain iframe to function when developing - httpSafePort: 3001, - - // This is for deployment in production, CryptPad uses a separate origin (domain) to host the - // cross-domain iframe. It can simply host the same content as CryptPad. - // httpSafeOrigin: "https://some-other-domain.xyz", - - httpUnsafeOrigin: domain, - - /* Your CryptPad server will share this value with clients - * via its /api/config endpoint. + /* We're very proud that CryptPad is available to the public as free software! + * We do, however, still need to pay our bills as we develop the platform. * - * If you want to host your API and asset servers on different hosts - * specify a URL for your API server websocket endpoint, like so: - * wss://api.yourdomain.com/cryptpad_websocket + * By default CryptPad will prompt users to consider donating to + * our OpenCollective campaign. We publish the state of our finances periodically + * so you can decide for yourself whether our expenses are reasonable. * - * Otherwise, leave this commented and your clients will use the default - * websocket (wss://yourdomain.com/cryptpad_websocket) + * You can disable any solicitations for donations by setting 'removeDonateButton' to true, + * but we'd appreciate it if you didn't! */ - //externalWebsocketURL: 'wss://api.yourdomain.com/cryptpad_websocket + //removeDonateButton: false, - /* CryptPad can be configured to send customized HTTP Headers - * These settings may vary widely depending on your needs - * Examples are provided below + /* CryptPad will display a point of contact for your instance on its contact page + * (/contact.html) if you provide it below. */ - httpHeaders: { - "X-XSS-Protection": "1; mode=block", - "X-Content-Type-Options": "nosniff", - "Access-Control-Allow-Origin": "*" - }, - - contentSecurity: baseCSP.join('; ') + - "script-src 'self'" + domain, + adminEmail: 'i.did.not.read.my.config@cryptpad.fr', - // CKEditor and OnlyOffice require significantly more lax content security policy in order to function. - padContentSecurity: baseCSP.join('; ') + - "script-src 'self' 'unsafe-eval' 'unsafe-inline'" + domain, + /* + * By default, CryptPad contacts one of our servers once a day. + * This check-in will also send some very basic information about your instance including its + * version and the adminEmail so we can reach you if we are aware of a serious problem. + * We will never sell it or send you marketing mail. + * + * If you want to block this check-in and remain set 'blockDailyCheck' to true. + */ + //blockDailyCheck: false, - /* Main pages - * add exceptions to the router so that we can access /privacy.html - * and other odd pages + /* + * By default users get 50MB of storage by registering on an instance. + * You can set this value to whatever you want. + * + * hint: 50MB is 50 * 1024 * 1024 */ - mainPages: [ - 'index', - 'privacy', - 'terms', - 'about', - 'contact', - 'what-is-cryptpad', - 'features', - 'faq', - 'maintenance' - ], + //defaultStorageLimit: 50 * 1024 * 1024, + /* ===================== - * Subscriptions + * STORAGE * ===================== */ - /* Limits, Donations, Subscriptions and Contact - * - * By default, CryptPad limits every registered user to 50MB of storage. It also shows a - * subscribe button which allows them to upgrade to a paid account. We handle payment, - * and keep 50% of the proceeds to fund ongoing development. - * - * You can: - * A: leave things as they are - * B: disable accounts but display a donate button - * C: hide any reference to paid accounts or donation + /* Pads that are not 'pinned' by any registered user can be set to expire + * after a configurable number of days of inactivity (default 90 days). + * The value can be changed or set to false to remove expiration. + * Expired pads can then be removed using a cron job calling the + * `evict-inactive.js` script with node * - * If you chose A then there's nothing to do. - * If you chose B, set 'allowSubscriptions' to false. - * If you chose C, set 'removeDonateButton' to true + * defaults to 90 days if nothing is provided */ - allowSubscriptions: true, - removeDonateButton: false, - - /* - * By default, CryptPad also contacts our accounts server once a day to check for changes in - * the people who have accounts. This check-in will also send the version of your CryptPad - * instance and your email so we can reach you if we are aware of a serious problem. We will - * never sell it or send you marketing mail. If you want to block this check-in and remain - * completely invisible, set this and allowSubscriptions both to false. - */ - adminEmail: 'i.did.not.read.my.config@cryptpad.fr', + //inactiveTime: 90, // days - /* Sales coming from your server will be identified by your domain + /* CryptPad archives some data instead of deleting it outright. + * This archived data still takes up space and so you'll probably still want to + * remove these files after a brief period. * - * If you are using CryptPad in a business context, please consider taking a support contract - * by contacting sales@cryptpad.fr + * cryptpad/scripts/evict-inactive.js is intended to be run daily + * from a crontab or similar scheduling service. + * + * The intent with this feature is to provide a safety net in case of accidental + * deletion. Set this value to the number of days you'd like to retain + * archived data before it's removed permanently. + * + * defaults to 15 days if nothing is provided */ - myDomain: _domain, + //archiveRetentionTime: 15, - /* - * If you are using CryptPad internally and you want to increase the per-user storage limit, - * change the following value. - * - * Please note: This limit is what makes people subscribe and what pays for CryptPad - * development. Running a public instance that provides a "better deal" than cryptpad.fr - * is effectively using the project against itself. + /* Max Upload Size (bytes) + * this sets the maximum size of any one file uploaded to the server. + * anything larger than this size will be rejected + * defaults to 20MB if no value is provided */ - defaultStorageLimit: 50 * 1024 * 1024, + //maxUploadSize: 20 * 1024 * 1024, /* * CryptPad allows administrators to give custom limits to their friends. @@ -198,8 +199,8 @@ module.exports = { * * hint: 1GB is 1024 * 1024 * 1024 bytes */ +/* customLimits: { - /* "https://my.awesome.website/user/#/1/cryptpad-user1/YZgXQxKR0Rcb6r6CmxHPdAGLVludrAF2lEnkbx1vVOo=": { limit: 20 * 1024 * 1024 * 1024, plan: 'insider', @@ -210,69 +211,15 @@ module.exports = { plan: 'insider', note: 'storage space donated by my.awesome.website' } - */ }, +*/ - /* ===================== - * STORAGE - * ===================== */ - - /* By default the CryptPad server will run scheduled tasks every five minutes - * If you want to run scheduled tasks in a separate process (like a crontab) - * you can disable this behaviour by setting the following value to true - */ - disableIntegratedTasks: false, - - /* Pads that are not 'pinned' by any registered user can be set to expire - * after a configurable number of days of inactivity (default 90 days). - * The value can be changed or set to false to remove expiration. - * Expired pads can then be removed using a cron job calling the - * `delete-inactive.js` script with node - */ - inactiveTime: 90, // days - - /* CryptPad can be configured to remove inactive data which has not been pinned. - * Deletion of data is always risky and as an operator you have the choice to - * archive data instead of deleting it outright. Set this value to true if - * you want your server to archive files and false if you want to keep using - * the old behaviour of simply removing files. + /* Users with premium accounts (those with a plan included in their customLimit) + * can benefit from an increased upload size limit. By default they are restricted to the same + * upload size as any other registered user. * - * WARNING: this is not implemented universally, so at the moment this will - * only apply to the removal of 'channels' due to inactivity. - */ - retainData: true, - - /* As described above, CryptPad offers the ability to archive some data - * instead of deleting it outright. This archived data still takes up space - * and so you'll probably still want to remove these files after a brief period. - * The intent with this feature is to provide a safety net in case of accidental - * deletion. Set this value to the number of days you'd like to retain - * archived data before it's removed permanently. - * - * If 'retainData' is set to false, there will never be any archived data - * to remove. - */ - archiveRetentionTime: 15, - - /* Max Upload Size (bytes) - * this sets the maximum size of any one file uploaded to the server. - * anything larger than this size will be rejected - */ - maxUploadSize: 20 * 1024 * 1024, - - /* ===================== - * HARDWARE RELATED - * ===================== */ - - /* CryptPad's file storage adaptor closes unused files after a configurable - * number of milliseconds (default 30000 (30 seconds)) - */ - channelExpirationMs: 30000, - - /* CryptPad's file storage adaptor is limited by the number of open files. - * When the adaptor reaches openFileLimit, it will clean up older files */ - openFileLimit: 2048, + //premiumUploadSize: 100 * 1024 * 1024, /* ===================== * DATABASE VOLUMES diff --git a/customize.dist/loading.js b/customize.dist/loading.js index e20e79438..32841f253 100644 --- a/customize.dist/loading.js +++ b/customize.dist/loading.js @@ -3,7 +3,6 @@ define([], function () { var loadingStyle = (function(){/* #cp-loading { - transition: opacity 0.75s, visibility 0s 0.75s; visibility: visible; position: fixed; z-index: 10000000; @@ -24,6 +23,7 @@ define([], function () { #cp-loading.cp-loading-hidden { opacity: 0; visibility: hidden; + transition: opacity 0.75s, visibility 0s 0.75s; } #cp-loading .cp-loading-logo { height: 300px; diff --git a/customize.dist/login.js b/customize.dist/login.js index 0bf36b7ce..3954aa1e2 100644 --- a/customize.dist/login.js +++ b/customize.dist/login.js @@ -180,7 +180,7 @@ define([ return; } - console.error(decryptedBlock); + //console.error(decryptedBlock); res.blockInfo = decryptedBlock; })); }).nThen(function (waitFor) { @@ -275,7 +275,7 @@ define([ if (res.blockInfo) { opt = loginOptionsFromBlock(res.blockInfo); userHash = res.blockInfo.User_hash; - console.error(opt, userHash); + //console.error(opt, userHash); } else { console.log("allocating random bytes for a new user object"); opt = allocateBytes(Nacl.randomBytes(Exports.requiredBytes)); @@ -293,7 +293,7 @@ define([ return void cb('MODERN_REGISTRATION_INIT'); } - console.error(JSON.stringify(rt.proxy)); + //console.error(JSON.stringify(rt.proxy)); // export the realtime object you checked RT = rt; @@ -458,6 +458,7 @@ define([ UI.removeLoadingScreen(function () { UI.alert(Messages.login_noSuchUser, function () { hashing = false; + $('#password').focus(); }); }); break; @@ -465,6 +466,7 @@ define([ UI.removeLoadingScreen(function () { UI.alert(Messages.login_invalUser, function () { hashing = false; + $('#password').focus(); }); }); break; @@ -472,6 +474,7 @@ define([ UI.removeLoadingScreen(function () { UI.alert(Messages.login_invalPass, function () { hashing = false; + $('#password').focus(); }); }); break; @@ -482,6 +485,7 @@ define([ ]); UI.alert(warning, function () { hashing = false; + $('#password').focus(); }); }); break; diff --git a/customize.dist/pages.js b/customize.dist/pages.js index fc3ee6175..d782e4a16 100644 --- a/customize.dist/pages.js +++ b/customize.dist/pages.js @@ -107,7 +107,7 @@ define([ ])*/ ]) ]), - h('div.cp-version-footer', "CryptPad v3.10.0 (Kouprey)") + h('div.cp-version-footer', "CryptPad v3.14.0 (OrienteCaveRat)") ]); }; diff --git a/customize.dist/src/less2/include/alertify.less b/customize.dist/src/less2/include/alertify.less index 557f391c1..f9c63215c 100644 --- a/customize.dist/src/less2/include/alertify.less +++ b/customize.dist/src/less2/include/alertify.less @@ -72,6 +72,10 @@ z-index: 100000; // alertify container font: @colortheme_app-font; + .cp-checkmark { + color: @cryptpad_text_col; + } + .cp-inline-alert-text { flex: 1; } @@ -182,6 +186,7 @@ margin-bottom: 10px; box-sizing: content-box; span { + .tools_unselectable(); font-size: 20px; height: 40px; line-height: 40px; @@ -190,12 +195,16 @@ border-left: 1px solid lighten(@alertify-base, 10%); border-right: 1px solid lighten(@alertify-base, 10%); cursor: pointer; - &:hover { + &:not(.disabled):hover { background-color: @alertify-light-bg; } + &.disabled { + color: #949494; + cursor: not-allowed; + } } span.alertify-tabs-active { - background-color: @alertify-fore; + background-color: @alertify-fore !important; border-left: 1px solid @alertify-fore; border-right: 1px solid @alertify-fore; color: @alertify-base; @@ -268,7 +277,7 @@ margin-bottom: 15px; } - button { + button:not('.pure-button') { display: inline-block; position: relative; margin: 6px 8px; @@ -279,11 +288,17 @@ nav { padding: @alertify_padding-base; text-align: right; - button { + button, div.cp-button-confirm { margin: 0px !important; - &:not(:first-child) { + &:not(:first-child):not(.left) { margin-left: @alertify_padding-base !important; } + &.left { + float: left; + } + } + div.cp-button-confirm { + vertical-align: middle; } } } @@ -386,18 +401,13 @@ } } div.wide { - div.alertify-tabs { - p.msg:not(:last-child) { - border-bottom: 1px solid @alertify-fore; - } - } .cp-share-columns { display: flex; flex-flow: row; & > .cp-share-column { width: 50%; - padding: 0 10px; + //padding: 0 10px; position: relative; &.contains-nav { nav { @@ -414,7 +424,20 @@ } } &:first-child { - border-right: 1px solid @alertify-fore; + margin-right: @alertify_padding-base; + } + &:last-child { + margin-left: @alertify_padding-base; + } + } + & > .cp-share-column-mid { + display: flex; + align-items: center; + button { + width: 50px; + margin: 0; + min-width: 0; + font-size: 18px !important; } } } diff --git a/customize.dist/src/less2/include/buttons.less b/customize.dist/src/less2/include/buttons.less index ad6aaf9cc..c3ef85b1c 100644 --- a/customize.dist/src/less2/include/buttons.less +++ b/customize.dist/src/less2/include/buttons.less @@ -10,7 +10,7 @@ @alertify-input-fg: @colortheme_modal-input-fg; input:not(.form-control), textarea { - background-color: @alertify-input-fg; + // background-color: @alertify-input-fg; color: @cryptpad_text_col; border: 1px solid @alertify-input-bg; width: 100%; @@ -23,6 +23,10 @@ } } + input:not(.form-control) { + height: @variables_input-height; + } + div.cp-alertify-type { display: flex; input { @@ -48,6 +52,25 @@ } } + div.cp-button-confirm { + display: inline-block; + button { + margin: 0; + } + .cp-button-timer { + height: 3px; + & > div { + height: 100%; + background-color: @colortheme_alertify-primary; + &.danger, &.btn-danger, &.danger-alt, &.btn-danger-alt { + background-color: @colortheme_alertify-red; + } + } + } + } + button.cp-button-confirm-placeholder { + margin-bottom: 3px !important; + } button:not(.pure-button):not(.md-button):not(.mdl-button) { @@ -85,6 +108,7 @@ white-space: normal; font-weight: bold; } + &.danger, &.btn-danger { background-color: @colortheme_alertify-red; border-color: @colortheme_alertify-red-border; @@ -94,6 +118,15 @@ } } + &.danger-alt, &.btn-danger-alt { + border-color: @colortheme_alertify-red; + color: @colortheme_alertify-red; + &:hover, &:active { + color: @colortheme_alertify-red-color; + background-color: contrast(@colortheme_modal-bg, darken(@colortheme_alertify-red, 10%), lighten(@colortheme_alertify-red, 10%)); + } + } + &.safe, &.btn-safe { background-color: @colortheme_alertify-green; border-color: @colortheme_alertify-green-border; diff --git a/customize.dist/src/less2/include/colortheme.less b/customize.dist/src/less2/include/colortheme.less index 05a46c25d..709afa7c8 100644 --- a/customize.dist/src/less2/include/colortheme.less +++ b/customize.dist/src/less2/include/colortheme.less @@ -135,7 +135,7 @@ @colortheme_oocell-bg: #40865c; @colortheme_oocell-color: #FFF; -@colortheme_oocell-warn: #cd2532; +@colortheme_oocell-warn: #ffbcc0; @colortheme_kanban-bg: #8C4; @colortheme_kanban-color: #000; diff --git a/customize.dist/src/less2/include/corner.less b/customize.dist/src/less2/include/corner.less index 0740586aa..feec62165 100644 --- a/customize.dist/src/less2/include/corner.less +++ b/customize.dist/src/less2/include/corner.less @@ -4,9 +4,9 @@ --LessLoader_require: LessLoader_currentFile(); }; & { - @corner-button-ok: #2c9b00; - @corner-button-cancel: #990000; @corner-link: #ffff7a; + @corner-blue: @colortheme_logo-1; + @corner-white: @colortheme_base; @keyframes appear { 0% { @@ -27,21 +27,23 @@ .cp-corner-container { position: absolute; - right: 0; - bottom: 0; - width: 300px; - height: 200px; - border-top-left-radius: 200px; - padding: 15px; - text-align: right; - background-color: @colortheme_logo-1; - color: @colortheme_base; + right: 10px; + bottom: 10px; + width: 350px; + padding: 10px; + background-color: fade(@corner-blue, 95%); + border: 1px solid @corner-blue; + color: @corner-white; z-index: 9999; transform-origin: bottom right; animation: appear 0.8s ease-in-out; - box-shadow: 0 0 10px 0 @colortheme_logo-1; - //transform: scale(0.1); - //transform: scale(1); + //box-shadow: 0 0 10px 0 @corner-blue; + + &.cp-corner-alt { + background-color: fade(@corner-white, 95%); + border: 1px solid @corner-blue; + color: @corner-blue; + } h1, h2, h3 { font-size: 1.5em; @@ -64,7 +66,7 @@ line-height: 15px; display: none; &:hover { - color: darken(@colortheme_base, 15%); + color: darken(@corner-white, 15%); } } .cp-corner-minimize { @@ -86,46 +88,95 @@ } } &.cp-corner-big { - width: 400px; - height: 250px; + width: 500px; + } + + .cp-corner-dontshow { + cursor: pointer; + .fa { + margin-right: 0.3em; + font-size: 1.1em; + } + &:hover { + color: darken(@corner-white, 10%); + } + } + &.cp-corner-alt { + .cp-corner-dontshow { + &:hover { + color: lighten(@corner-blue, 10%); + } + } } .cp-corner-actions { min-height: 30px; - margin: 15px auto; - display: inline-block; + margin: 10px auto; + display: block; + text-align: right; } .cp-corner-footer { - font-style: italic; font-size: 0.8em; } .cp-corner-footer, .cp-corner-text { a { - color: @corner-link; + color: @corner-white; + text-decoration: underline; &:hover { - color: darken(@corner-link, 20%); + color: darken(@corner-white, 10%); } } } + &.cp-corner-alt a { + color: @corner-blue; + &:hover { + color: lighten(@corner-blue, 10%); + } + } button { - border: 0px; padding: 5px; - color: @colortheme_base; - margin-left: 5px; + color: @corner-white; + &:not(:first-child) { + margin-left: 10px; + } outline: none; + text-transform: uppercase; + border: 1px solid @corner-white; + .fa, .cptools { + margin-right: 0.3em; + } &.cp-corner-primary { - background-color: @corner-button-ok; - font-weight: bold; + background-color: @corner-white; + color: @corner-blue; &:hover { - background-color: lighten(@corner-button-ok, 10%); + background-color: lighten(@corner-blue, 50%); + border-color: lighten(@corner-blue, 50%); } } &.cp-corner-cancel { - background-color: @corner-button-cancel; - margin-left: 10px; + background-color: @corner-blue; + color: @corner-white; + &:hover { + background-color: darken(@corner-blue, 10%); + } + } + } + &.cp-corner-alt button { + border-color: @corner-blue; + &.cp-corner-primary { + background-color: @corner-blue; + color: @corner-white; + &:hover { + background-color: darken(@corner-blue, 10%); + border-color: darken(@corner-blue, 10%); + } + } + &.cp-corner-cancel { + background-color: @corner-white; + color: @corner-blue; &:hover { - background-color: lighten(@corner-button-cancel, 10%); + background-color: lighten(@corner-blue, 50%); } } } diff --git a/customize.dist/src/less2/include/fileupload.less b/customize.dist/src/less2/include/fileupload.less index ebe93399b..8fb1c8857 100644 --- a/customize.dist/src/less2/include/fileupload.less +++ b/customize.dist/src/less2/include/fileupload.less @@ -14,9 +14,11 @@ right: 10vw; bottom: 10vh; box-sizing: border-box; - z-index: 1000000; //Z file upload table container + z-index: 100000; //Z file upload table container display: none; color: darken(@colortheme_drive-bg, 10%); + max-height: 180px; + overflow-y: auto; @media screen and (max-width: @browser_media-medium-screen) { left: 5vw; right: 5vw; bottom: 5vw; @@ -26,6 +28,9 @@ display: flex; background-color: darken(@colortheme_modal-bg, 10%); font-weight: bold; + position: sticky; + top: 0; + z-index: 1; .cp-fileupload-header-title { padding: 0.25em 0.5em; flex-grow: 1; diff --git a/customize.dist/src/less2/include/markdown.less b/customize.dist/src/less2/include/markdown.less index a79717550..631fe8f12 100644 --- a/customize.dist/src/less2/include/markdown.less +++ b/customize.dist/src/less2/include/markdown.less @@ -53,6 +53,45 @@ } } +.markdown_cryptpad() { + word-wrap: break-word; + + h1, h2, h3, h4, h5, h6 { + font-weight: bold; + padding-bottom: 0.3em; + border-bottom: 1px solid #eee; + } + li { + min-height: 22px; + } + .todo-list-item { + list-style: none; + position: relative; + .fa { + position: absolute; + margin-left: -17px; + margin-top: 4px; + } + } + media-tag { + * { + max-width: 100%; + } + iframe[src$=".pdf"] { + width: 100%; + height: 80vh; + max-height: 90vh; + } + } + media-tag:empty { + width: 100px; + height: 100px; + display: inline-block; + border: 1px solid #BBB; + } + +} + .markdown_preformatted-code (@color: #333) { pre > code { display: block; diff --git a/customize.dist/src/less2/include/modals-ui-elements.less b/customize.dist/src/less2/include/modals-ui-elements.less index 3e6855fbe..855142a28 100644 --- a/customize.dist/src/less2/include/modals-ui-elements.less +++ b/customize.dist/src/less2/include/modals-ui-elements.less @@ -26,12 +26,65 @@ // Properties modal .cp-app-prop { margin-bottom: 10px; + .cp-app-prop-size-container { + height: 20px; + background-color: @colortheme_logo-2; + margin: 10px 0; + padding: 0; + div { + height: 20px; + margin: 0; + padding: 0; + background-color: #CCCCCC; + } + } + .cp-app-prop-size-legend { + color: @colortheme_modal-fg; + display: flex; + margin: 10px 0; + & > div { + display: flex; + align-items: center; + flex-basis: 50%; + margin: 0; + padding: 0; + } + .cp-app-prop-history-size-color, .cp-app-prop-contents-size-color { + display: inline-block; + height: 20px; + width: 20px; + margin-right: 10px; + } + .cp-app-prop-history-size-color { + background-color: #CCCCCC; + } + .cp-app-prop-contents-size-color { + background-color: @colortheme_logo-2; + } + } } .cp-app-prop-content { color: @cryptpad_text_col; } + // Access modal + .cp-overlay-container { + position: relative; + .cp-overlay { + position: absolute; + background-color: rgba(255,255,255,0.5); + top: 0; + bottom: 0; + left: 0; + right: 0; + } + } + + .cp-access-margin-right { + margin-right: 5px !important; + } + // teams invite modal .cp-teams-invite-block { display: flex; diff --git a/customize.dist/src/less2/include/notifications.less b/customize.dist/src/less2/include/notifications.less index 1e4430db2..a24ad32d3 100644 --- a/customize.dist/src/less2/include/notifications.less +++ b/customize.dist/src/less2/include/notifications.less @@ -8,6 +8,7 @@ @notif-height: 50px; .cp-notifications-container { max-width: 300px; + width: 300px; display: flex; flex-flow: column; & hr { @@ -16,6 +17,14 @@ .cp-notification { min-height: @notif-height; display: flex; + .cp-avatar { + .avatar_main(30px); + padding: 0 5px; + cursor: pointer; + &:hover { + background-color: rgba(0,0,0,0.1); + } + } .cp-notification-content { flex: 1; align-items: stretch; diff --git a/customize.dist/src/less2/include/sidebar-layout.less b/customize.dist/src/less2/include/sidebar-layout.less index 1f9c92457..7961e1bee 100644 --- a/customize.dist/src/less2/include/sidebar-layout.less +++ b/customize.dist/src/less2/include/sidebar-layout.less @@ -1,6 +1,7 @@ @import (reference) "/customize/src/less2/include/colortheme-all.less"; @import (reference) "/customize/src/less2/include/leftside-menu.less"; @import (reference) "/customize/src/less2/include/buttons.less"; +@import (reference) "/customize/src/less2/include/browser.less"; @sidebar_button-width: 400px; @@ -73,6 +74,7 @@ padding: 5px 20px; color: @rightside-color; overflow: auto; + padding-bottom: 200px; // Following rules are only in settings .cp-sidebarlayout-element { @@ -96,7 +98,7 @@ } } margin-bottom: 20px; - .buttons_main(); + .buttons_main(); } [type="text"], [type="password"], button { vertical-align: middle; @@ -107,6 +109,7 @@ .cp-sidebarlayout-input-block { display: inline-flex; width: @sidebar_button-width; + max-width: 100%; input { flex: 1; //border-radius: 0.25em 0 0 0.25em; @@ -117,6 +120,8 @@ //border-radius: 0 0.25em 0.25em 0; //border: 1px solid #adadad; border-left: 0px; + height: @variables_input-height; + margin: 0 !important; } } &>div { @@ -161,6 +166,25 @@ } */ } + @media screen and (max-width: @browser_media-medium-screen) { + flex-flow: column; + overflow: auto; + #cp-sidebarlayout-leftside { + width: 100% !important; // Override "narrow" mode + padding-bottom: 20px; + .cp-sidebarlayout-categories { + .cp-sidebarlayout-category { + margin: 0; + span.cp-sidebar-layout-category-name { + display: inline !important; // override "narrow" mode + } + } + } + } + #cp-sidebarlayout-rightside { + overflow: unset; + } + } } } diff --git a/customize.dist/src/less2/include/tokenfield.less b/customize.dist/src/less2/include/tokenfield.less index faa302b0a..358f0d18b 100644 --- a/customize.dist/src/less2/include/tokenfield.less +++ b/customize.dist/src/less2/include/tokenfield.less @@ -1,3 +1,4 @@ +@import (reference) "./colortheme-all.less"; @import (reference) "./tools.less"; .tokenfield_main () { @@ -11,7 +12,6 @@ .tools_unselectable(); display: flex; flex-wrap: wrap; - justify-content: space-around; height: auto; min-height: 34px; padding-bottom: 0px; @@ -22,21 +22,19 @@ width: ~"calc(100% - 20px)"; .token { box-sizing: border-box; - border-radius: 3px; display: inline-flex; align-items: center; - border: 1px solid #d9d9d9; - background-color: #ededed; + background-color: rgba(0, 0, 0, 0.1); white-space: nowrap; margin: 2px 0; + margin-right: 5px; height: 24px; vertical-align: middle; cursor: default; - - color: #222; + color: @cryptpad_text_col; &:hover { - border-color: #b9b9b9; + background-color: rgba(0, 0, 0, 0.2); } &.invalid { background: none; @@ -57,6 +55,7 @@ vertical-align: middle; } .close { + opacity: 1; font-family: Arial; display: inline-block; line-height: 1.49em; diff --git a/customize.dist/src/less2/include/toolbar.less b/customize.dist/src/less2/include/toolbar.less index 1d1ca0c47..3c59218f4 100644 --- a/customize.dist/src/less2/include/toolbar.less +++ b/customize.dist/src/less2/include/toolbar.less @@ -168,6 +168,7 @@ position: relative; order: -2; resize: horizontal; + z-index: 1; #cp-app-contacts-container { height: 100%; } @@ -196,6 +197,7 @@ padding: 10px; box-sizing: border-box; order: -3; + z-index: 1; .cp-toolbar-userlist-drawer-close { position: absolute; margin-top: -10px; @@ -1157,6 +1159,11 @@ margin-left: 11px; } } + &.fa-unlock-alt { + .cp-toolbar-drawer-element { + margin-left: 15px; + } + } &.fa-question { .cp-toolbar-drawer-element { margin-left: 16px; @@ -1168,6 +1175,7 @@ } order: 8; &.fa-history { order: 1; } + &.fa-clone { order: 1; } &.fa-download { order: 2; } &.fa-upload { order: 3; } &.fa-print { order: 4; } diff --git a/customize.dist/src/less2/include/usergrid.less b/customize.dist/src/less2/include/usergrid.less index 6ba8c2d07..9fbcb1da0 100644 --- a/customize.dist/src/less2/include/usergrid.less +++ b/customize.dist/src/less2/include/usergrid.less @@ -55,7 +55,7 @@ justify-content: center; align-items: center; padding: 5px; - margin: 3px; + margin: 3px !important; cursor: default; transition: order 0.5s, background-color 0.5s; margin-top: 1px; @@ -109,6 +109,27 @@ color: @colortheme_alertify-primary-text; } } + .fa-times { + padding-left: 5px; + cursor: pointer; + height: 100%; + line-height: 25px; + color: @cryptpad_text_col; + &:hover { + color: lighten(@cryptpad_text_col, 10%); + } + } + } + &.list { + .cp-usergrid-user { + width: auto; + max-width: calc(100% - 6px); + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + display: inline-flex; + flex: unset; + } } } } diff --git a/customize.dist/src/less2/include/variables.less b/customize.dist/src/less2/include/variables.less index ba6c642e2..570779f05 100644 --- a/customize.dist/src/less2/include/variables.less +++ b/customize.dist/src/less2/include/variables.less @@ -3,6 +3,7 @@ // Elements size @variables_bar-height: 32px; +@variables_input-height: 38px; // Used in modal.less and alertify.less @variables_padding: 12px; diff --git a/docs/cryptpad.service b/docs/cryptpad.service index 54adf12e9..eee8b2af5 100644 --- a/docs/cryptpad.service +++ b/docs/cryptpad.service @@ -19,5 +19,10 @@ Group=cryptpad # modify to match your working directory Environment='PWD="/home/cryptpad/cryptpad/cryptpad"' +# systemd sets the open file limit to 4000 unless you override it +# cryptpad stores its data with the filesystem, so you should increase this to match the value of `ulimit -n` +# or risk EMFILE errors. +LimitNOFILE=1000000 + [Install] WantedBy=multi-user.target diff --git a/docs/example.nginx.conf b/docs/example.nginx.conf index bef853d26..ea8224c14 100644 --- a/docs/example.nginx.conf +++ b/docs/example.nginx.conf @@ -72,7 +72,7 @@ server { set $styleSrc "'unsafe-inline' 'self' ${main_domain}"; # connect-src restricts URLs which can be loaded using script interfaces - set $connectSrc "'self' https://${main_domain} $main_domain https://${api_domain} blob:"; + set $connectSrc "'self' https://${main_domain} ${main_domain} https://${api_domain} blob: wss://${api_domain} ${api_domain} ${files_domain}"; # fonts can be loaded from data-URLs or the main domain set $fontSrc "'self' data: ${main_domain}"; @@ -96,7 +96,7 @@ server { set $workerSrc "https://${main_domain}"; # script-src specifies valid sources for javascript, including inline handlers - set $scriptSrc "'self' ${main_domain}"; + set $scriptSrc "'self' resource: ${main_domain}"; set $unsafe 0; # the following assets are loaded via the sandbox domain @@ -106,11 +106,11 @@ server { if ($uri ~ ^\/common\/onlyoffice\/.*\/index\.html.*$) { set $unsafe 1; } # everything except the sandbox domain is a privileged scope, as they might be used to handle keys - if ($host != sandbox.cryptpad.info) { set $unsafe 0; } + if ($host != $sandbox_domain) { set $unsafe 0; } # privileged contexts allow a few more rights than unprivileged contexts, though limits are still applied if ($unsafe) { - set $scriptSrc "'self' 'unsafe-eval' 'unsafe-inline' ${main_domain}"; + set $scriptSrc "'self' 'unsafe-eval' 'unsafe-inline' resource: ${main_domain}"; } # Finally, set all the rules you composed above. diff --git a/historyKeeper.js b/historyKeeper.js deleted file mode 100644 index fe16a204c..000000000 --- a/historyKeeper.js +++ /dev/null @@ -1,1021 +0,0 @@ -/* jshint esversion: 6 */ -/* global Buffer */ -;(function () { 'use strict'; - -const nThen = require('nthen'); -const Nacl = require('tweetnacl/nacl-fast'); -const Crypto = require('crypto'); -const Once = require("./lib/once"); -const Meta = require("./lib/metadata"); -const WriteQueue = require("./lib/write-queue"); -const BatchRead = require("./lib/batch-read"); - -let Log; -const now = function () { return (new Date()).getTime(); }; -const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds - -/* getHash - * this function slices off the leading portion of a message which is - most likely unique - * these "hashes" are used to identify particular messages in a channel's history - * clients store "hashes" either in memory or in their drive to query for new messages: - * when reconnecting to a pad - * when connecting to chat or a mailbox - * thus, we can't change this function without invalidating client data which: - * is encrypted clientside - * can't be easily migrated - * don't break it! -*/ -const getHash = function (msg) { - if (typeof(msg) !== 'string') { - Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); - return ''; - } - return msg.slice(0,64); -}; - -const tryParse = function (str) { - try { - return JSON.parse(str); - } catch (err) { - Log.error('HK_PARSE_ERROR', err); - } -}; - -/* sliceCpIndex - returns a list of all checkpoints which might be relevant for a client connecting to a session - - * if there are two or fewer checkpoints, return everything you have - * if there are more than two - * return at least two - * plus any more which were received within the last 100 messages - - This is important because the additional history is what prevents - clients from forking on checkpoints and dropping forked history. - -*/ -const sliceCpIndex = function (cpIndex, line) { - // Remove "old" checkpoints (cp sent before 100 messages ago) - const minLine = Math.max(0, (line - 100)); - let start = cpIndex.slice(0, -2); - const end = cpIndex.slice(-2); - start = start.filter(function (obj) { - return obj.line > minLine; - }); - return start.concat(end); -}; - -const isMetadataMessage = function (parsed) { - return Boolean(parsed && parsed.channel); -}; - -// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays -const isValidValidateKeyString = function (key) { - try { - return typeof(key) === 'string' && - Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; - } catch (e) { - return false; - } -}; - -module.exports.create = function (cfg) { - const rpc = cfg.rpc; - const tasks = cfg.tasks; - const store = cfg.store; - const retainData = cfg.retainData; - Log = cfg.log; - - Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); - - const metadata_cache = {}; - const HISTORY_KEEPER_ID = Crypto.randomBytes(8).toString('hex'); - - Log.verbose('HK_ID', 'History keeper ID: ' + HISTORY_KEEPER_ID); - - let sendMsg = function () {}; - let STANDARD_CHANNEL_LENGTH, EPHEMERAL_CHANNEL_LENGTH; - const setConfig = function (config) { - STANDARD_CHANNEL_LENGTH = config.STANDARD_CHANNEL_LENGTH; - EPHEMERAL_CHANNEL_LENGTH = config.EPHEMERAL_CHANNEL_LENGTH; - sendMsg = config.sendMsg; - }; - - /* computeIndex - can call back with an error or a computed index which includes: - * cpIndex: - * array including any checkpoints pushed within the last 100 messages - * processed by 'sliceCpIndex(cpIndex, line)' - * offsetByHash: - * a map containing message offsets by their hash - * this is for every message in history, so it could be very large... - * except we remove offsets from the map if they occur before the oldest relevant checkpoint - * size: in bytes - * metadata: - * validationKey - * expiration time - * owners - * ??? (anything else we might add in the future) - * line - * the number of messages in history - * including the initial metadata line, if it exists - - */ - const computeIndex = function (channelName, cb) { - const cpIndex = []; - let messageBuf = []; - let metadata; - let i = 0; - - const ref = {}; - - const CB = Once(cb); - - const offsetByHash = {}; - let size = 0; - nThen(function (w) { - // iterate over all messages in the channel log - // old channels can contain metadata as the first message of the log - // remember metadata the first time you encounter it - // otherwise index important messages in the log - store.readMessagesBin(channelName, 0, (msgObj, readMore) => { - let msg; - // keep an eye out for the metadata line if you haven't already seen it - // but only check for metadata on the first line - if (!i && !metadata && msgObj.buff.indexOf('{') === 0) { - i++; // always increment the message counter - msg = tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return readMore(); } - - // validate that the current line really is metadata before storing it as such - if (isMetadataMessage(msg)) { - metadata = msg; - return readMore(); - } - } - i++; - if (msgObj.buff.indexOf('cp|') > -1) { - msg = msg || tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return readMore(); } - // cache the offsets of checkpoints if they can be parsed - if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { - cpIndex.push({ - offset: msgObj.offset, - line: i - }); - // we only want to store messages since the latest checkpoint - // so clear the buffer every time you see a new one - messageBuf = []; - } - } - // if it's not metadata or a checkpoint then it should be a regular message - // store it in the buffer - messageBuf.push(msgObj); - return readMore(); - }, w((err) => { - if (err && err.code !== 'ENOENT') { - w.abort(); - return void CB(err); - } - - // once indexing is complete you should have a buffer of messages since the latest checkpoint - // map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients - messageBuf.forEach((msgObj) => { - const msg = tryParse(msgObj.buff.toString('utf8')); - if (typeof msg === "undefined") { return; } - if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { - // msgObj.offset is API guaranteed by our storage module - // it should always be a valid positive integer - offsetByHash[getHash(msg[4])] = msgObj.offset; - } - // There is a trailing \n at the end of the file - size = msgObj.offset + msgObj.buff.length + 1; - }); - })); - }).nThen(function (w) { - // create a function which will iterate over amendments to the metadata - const handler = Meta.createLineHandler(ref, Log.error); - - // initialize the accumulator in case there was a foundational metadata line in the log content - if (metadata) { handler(void 0, metadata); } - - // iterate over the dedicated metadata log (if it exists) - // proceed even in the event of a stream error on the metadata log - store.readDedicatedMetadata(channelName, handler, w(function (err) { - if (err) { - return void Log.error("DEDICATED_METADATA_ERROR", err); - } - })); - }).nThen(function () { - // when all is done, cache the metadata in memory - if (ref.index) { // but don't bother if no metadata was found... - metadata = metadata_cache[channelName] = ref.meta; - } - // and return the computed index - CB(null, { - // Only keep the checkpoints included in the last 100 messages - cpIndex: sliceCpIndex(cpIndex, i), - offsetByHash: offsetByHash, - size: size, - metadata: metadata, - line: i - }); - }); - }; - - /* getIndex - calls back with an error if anything goes wrong - or with a cached index for a channel if it exists - (along with metadata) - otherwise it calls back with the index computed by 'computeIndex' - - as an added bonus: - if the channel exists but its index does not then it caches the index - */ - const batchIndexReads = BatchRead("HK_GET_INDEX"); - const getIndex = (ctx, channelName, cb) => { - const chan = ctx.channels[channelName]; - // if there is a channel in memory and it has an index cached, return it - if (chan && chan.index) { - // enforce async behaviour - return void setTimeout(function () { - cb(undefined, chan.index); - }); - } - - batchIndexReads(channelName, cb, function (done) { - computeIndex(channelName, (err, ret) => { - // this is most likely an unrecoverable filesystem error - if (err) { return void done(err); } - // cache the computed result if possible - if (chan) { chan.index = ret; } - // return - done(void 0, ret); - }); - }); - }; - - /*:: - type cp_index_item = { - offset: number, - line: number - } - */ - - /* storeMessage - * ctx - * channel id - * the message to store - * whether the message is a checkpoint - * optionally the hash of the message - * it's not always used, but we guard against it - - - * async but doesn't have a callback - * source of a race condition whereby: - * two messaages can be inserted - * two offsets can be computed using the total size of all the messages - * but the offsets don't correspond to the actual location of the newlines - * because the two actions were performed like ABba... - * the fix is to use callbacks and implement queueing for writes - * to guarantee that offset computation is always atomic with writes - */ - const queueStorage = WriteQueue(); - - const storeMessage = function (ctx, channel, msg, isCp, optionalMessageHash) { - const id = channel.id; - - queueStorage(id, function (next) { - const msgBin = Buffer.from(msg + '\n', 'utf8'); - // Store the message first, and update the index only once it's stored. - // store.messageBin can be async so updating the index first may - // result in a wrong cpIndex - nThen((waitFor) => { - store.messageBin(id, msgBin, waitFor(function (err) { - if (err) { - waitFor.abort(); - Log.error("HK_STORE_MESSAGE_ERROR", err.message); - - // this error is critical, but there's not much we can do at the moment - // proceed with more messages, but they'll probably fail too - // at least you won't have a memory leak - - // TODO make it possible to respond to clients with errors so they know - // their message wasn't stored - return void next(); - } - })); - }).nThen((waitFor) => { - getIndex(ctx, id, waitFor((err, index) => { - if (err) { - Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); - // non-critical, we'll be able to get the channel index later - return void next(); - } - if (typeof (index.line) === "number") { index.line++; } - if (isCp) { - index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); - for (let k in index.offsetByHash) { - if (index.offsetByHash[k] < index.cpIndex[0]) { - delete index.offsetByHash[k]; - } - } - index.cpIndex.push(({ - offset: index.size, - line: ((index.line || 0) + 1) - } /*:cp_index_item*/)); - } - if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } - index.size += msgBin.length; - - // handle the next element in the queue - next(); - })); - }); - }); - }; - - /* historyKeeperBroadcast - * uses API from the netflux server to send messages to every member of a channel - * sendMsg runs in a try-catch and drops users if sending a message fails - */ - const historyKeeperBroadcast = function (ctx, channel, msg) { - let chan = ctx.channels[channel] || (([] /*:any*/) /*:Chan_t*/); - chan.forEach(function (user) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)]); - }); - }; - - /* expireChannel is here to clean up channels that should have been removed - but for some reason are still present - */ - const expireChannel = function (ctx, channel) { - if (retainData) { - return void store.archiveChannel(channel, function (err) { - Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { - channelId: channel, - status: err? String(err): "SUCCESS", - }); - }); - } - - store.removeChannel(channel, function (err) { - Log.info("DELETION_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { - channelid: channel, - status: err? String(err): "SUCCESS", - }); - }); - }; - - /* checkExpired - * synchronously returns true or undefined to indicate whether the channel is expired - * according to its metadata - * has some side effects: - * closes the channel via the store.closeChannel API - * and then broadcasts to all channel members that the channel has expired - * removes the channel from the netflux-server's in-memory cache - * removes the channel metadata from history keeper's in-memory cache - - FIXME the boolean nature of this API should be separated from its side effects - */ - const checkExpired = function (ctx, channel) { - if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } - let metadata = metadata_cache[channel]; - if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } - - // the number of milliseconds ago the channel should have expired - let pastDue = (+new Date()) - metadata.expire; - - // less than zero means that it hasn't expired yet - if (pastDue < 0) { return false; } - - // if it should have expired more than a day ago... - // there may have been a problem with scheduling tasks - // or the scheduled tasks may not be running - // so trigger a removal from here - if (pastDue >= ONE_DAY) { expireChannel(ctx, channel); } - - // close the channel - store.closeChannel(channel, function () { - historyKeeperBroadcast(ctx, channel, { - error: 'EEXPIRED', - channel: channel - }); - // remove it from any caches after you've told anyone in the channel - // that it has expired - delete ctx.channels[channel]; - delete metadata_cache[channel]; - }); - - // return true to indicate that it has expired - return true; - }; - - var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; - - /* onChannelMessage - Determine what we should store when a message a broadcasted to a channel" - - * ignores ephemeral channels - * ignores messages sent to expired channels - * rejects duplicated checkpoints - * validates messages to channels that have validation keys - * caches the id of the last saved checkpoint - * adds timestamps to incoming messages - * writes messages to the store - */ - const onChannelMessage = function (ctx, channel, msgStruct) { - // TODO our usage of 'channel' here looks prone to errors - // we only use it for its 'id', but it can contain other stuff - // also, we're using this RPC from both the RPC and Netflux-server - // we should probably just change this to expect a channel id directly - - // don't store messages if the channel id indicates that it's an ephemeral message - if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } - - const isCp = /^cp\|/.test(msgStruct[4]); - let id; - if (isCp) { - // id becomes either null or an array or results... - id = CHECKPOINT_PATTERN.exec(msgStruct[4]); - if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { - // Reject duplicate checkpoints - return; - } - } - - let metadata; - nThen(function (w) { - // getIndex (and therefore the latest metadata) - getIndex(ctx, channel.id, w(function (err, index) { - if (err) { - w.abort(); - return void Log.error('CHANNEL_MESSAGE_ERROR', err); - } - - if (!index.metadata) { - // if there's no channel metadata then it can't be an expiring channel - // nor can we possibly validate it - return; - } - - metadata = index.metadata; - - // don't write messages to expired channels - if (checkExpired(ctx, channel)) { return void w.abort(); } - - // if there's no validateKey present skip to the next block - if (!metadata.validateKey) { return; } - - // trim the checkpoint indicator off the message if it's present - let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; - // convert the message from a base64 string into a Uint8Array - - // FIXME this can fail and the client won't notice - signedMsg = Nacl.util.decodeBase64(signedMsg); - - // FIXME this can blow up - // TODO check that that won't cause any problems other than not being able to append... - const validateKey = Nacl.util.decodeBase64(metadata.validateKey); - // validate the message - const validated = Nacl.sign.open(signedMsg, validateKey); - if (!validated) { - // don't go any further if the message fails validation - w.abort(); - Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); - return; - } - })); - }).nThen(function () { - // do checkpoint stuff... - - // 1. get the checkpoint id - // 2. reject duplicate checkpoints - - if (isCp) { - // if the message is a checkpoint we will have already validated - // that it isn't a duplicate. remember its id so that we can - // repeat this process for the next incoming checkpoint - - // WARNING: the fact that we only check the most recent checkpoints - // is a potential source of bugs if one editor has high latency and - // pushes a duplicate of an earlier checkpoint than the latest which - // has been pushed by editors with low latency - // FIXME - if (Array.isArray(id) && id[2]) { - // Store new checkpoint hash - channel.lastSavedCp = id[2]; - } - } - - // add the time to the message - msgStruct.push(now()); - - // storeMessage - storeMessage(ctx, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4])); - }); - }; - - /* dropChannel - * exported as API - * used by chainpad-server/NetfluxWebsocketSrv.js - * cleans up memory structures which are managed entirely by the historyKeeper - * the netflux server manages other memory in ctx.channels - */ - const dropChannel = function (chanName) { - delete metadata_cache[chanName]; - }; - - /* getHistoryOffset - returns a number representing the byte offset from the start of the log - for whatever history you're seeking. - - query by providing a 'lastKnownHash', - which is really just a string of the first 64 characters of an encrypted message. - OR by -1 which indicates that we want the full history (byte offset 0) - OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant - (typically the last few checkpoints) - - this function embeds a lot of the history keeper's logic: - - 0. if you passed -1 as the lastKnownHash it means you want the complete history - * I'm not sure why you'd need to call this function if you know it will return 0 in this case... - * it has a side-effect of filling the index cache if it's empty - 1. if you provided a lastKnownHash and that message does not exist in the history: - * either the client has made a mistake or the history they knew about no longer exists - * call back with EINVAL - 2. if you did not provide a lastKnownHash - * and there are fewer than two checkpoints: - * return 0 (read from the start of the file) - * and there are two or more checkpoints: - * return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant - 3. if you did provide a lastKnownHash - * read through the log until you find the hash that you're looking for - * call back with either the byte offset of the message that you found OR - * -1 if you didn't find it - - */ - const getHistoryOffset = (ctx, channelName, lastKnownHash, cb /*:(e:?Error, os:?number)=>void*/) => { - // lastKnownhash === -1 means we want the complete history - if (lastKnownHash === -1) { return void cb(null, 0); } - let offset = -1; - nThen((waitFor) => { - getIndex(ctx, channelName, waitFor((err, index) => { - if (err) { waitFor.abort(); return void cb(err); } - - // check if the "hash" the client is requesting exists in the index - const lkh = index.offsetByHash[lastKnownHash]; - // we evict old hashes from the index as new checkpoints are discovered. - // if someone connects and asks for a hash that is no longer relevant, - // we tell them it's an invalid request. This is because of the semantics of "GET_HISTORY" - // which is only ever used when connecting or reconnecting in typical uses of history... - // this assumption should hold for uses by chainpad, but perhaps not for other uses cases. - // EXCEPT: other cases don't use checkpoints! - // clients that are told that their request is invalid should just make another request - // without specifying the hash, and just trust the server to give them the relevant data. - // QUESTION: does this mean mailboxes are causing the server to store too much stuff in memory? - if (lastKnownHash && typeof(lkh) !== "number") { - waitFor.abort(); - return void cb(new Error('EINVAL')); - } - - // Since last 2 checkpoints - if (!lastKnownHash) { - waitFor.abort(); - // Less than 2 checkpoints in the history: return everything - if (index.cpIndex.length < 2) { return void cb(null, 0); } - // Otherwise return the second last checkpoint's index - return void cb(null, index.cpIndex[0].offset); - /* LATER... - in practice, two checkpoints can be very close together - we have measures to avoid duplicate checkpoints, but editors - can produce nearby checkpoints which are slightly different, - and slip past these protections. To be really careful, we can - seek past nearby checkpoints by some number of patches so as - to ensure that all editors have sufficient knowledge of history - to reconcile their differences. */ - } - - offset = lkh; - })); - }).nThen((waitFor) => { - // if offset is less than zero then presumably the channel has no messages - // returning falls through to the next block and therefore returns -1 - if (offset !== -1) { return; } - - // do a lookup from the index - // FIXME maybe we don't need this anymore? - // otherwise we have a non-negative offset and we can start to read from there - store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { - // tryParse return a parsed message or undefined - const msg = tryParse(msgObj.buff.toString('utf8')); - // if it was undefined then go onto the next message - if (typeof msg === "undefined") { return readMore(); } - if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4])) { - return void readMore(); - } - offset = msgObj.offset; - abort(); - }, waitFor(function (err) { - if (err) { waitFor.abort(); return void cb(err); } - })); - }).nThen(() => { - cb(null, offset); - }); - }; - - /* getHistoryAsync - * finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' - * streams through the rest of the messages, safely parsing them and returning the parsed content to the handler - * calls back when it has reached the end of the log - - Used by: - * GET_HISTORY - - */ - const getHistoryAsync = (ctx, channelName, lastKnownHash, beforeHash, handler, cb) => { - let offset = -1; - nThen((waitFor) => { - getHistoryOffset(ctx, channelName, lastKnownHash, waitFor((err, os) => { - if (err) { - waitFor.abort(); - return void cb(err); - } - offset = os; - })); - }).nThen((waitFor) => { - if (offset === -1) { return void cb(new Error("could not find offset")); } - const start = (beforeHash) ? 0 : offset; - store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { - if (beforeHash && msgObj.offset >= offset) { return void abort(); } - handler(tryParse(msgObj.buff.toString('utf8')), readMore); - }, waitFor(function (err) { - return void cb(err); - })); - }); - }; - - /* getOlderHistory - * allows clients to query for all messages until a known hash is read - * stores all messages in history as they are read - * can therefore be very expensive for memory - * should probably be converted to a streaming interface - - Used by: - * GET_HISTORY_RANGE - */ - const getOlderHistory = function (channelName, oldestKnownHash, cb) { - var messageBuffer = []; - var found = false; - store.getMessages(channelName, function (msgStr) { - if (found) { return; } - - let parsed = tryParse(msgStr); - if (typeof parsed === "undefined") { return; } - - // identify classic metadata messages by their inclusion of a channel. - // and don't send metadata, since: - // 1. the user won't be interested in it - // 2. this metadata is potentially incomplete/incorrect - if (isMetadataMessage(parsed)) { return; } - - var content = parsed[4]; - if (typeof(content) !== 'string') { return; } - - var hash = getHash(content); - if (hash === oldestKnownHash) { - found = true; - } - messageBuffer.push(parsed); - }, function (err) { - if (err) { - Log.error("HK_GET_OLDER_HISTORY", err); - } - cb(messageBuffer); - }); - }; - - - /* onChannelCleared - * broadcasts to all clients in a channel if that channel is deleted - */ - const onChannelCleared = function (ctx, channel) { - historyKeeperBroadcast(ctx, channel, { - error: 'ECLEARED', - channel: channel - }); - }; - // When a channel is removed from datastore, broadcast a message to all its connected users - const onChannelDeleted = function (ctx, channel) { - store.closeChannel(channel, function () { - historyKeeperBroadcast(ctx, channel, { - error: 'EDELETED', - channel: channel - }); - }); - delete ctx.channels[channel]; - delete metadata_cache[channel]; - }; - // Check if the selected channel is expired - // If it is, remove it from memory and broadcast a message to its members - - const onChannelMetadataChanged = function (ctx, channel, metadata) { - if (channel && metadata_cache[channel] && typeof (metadata) === "object") { - Log.silly('SET_METADATA_CACHE', 'Channel '+ channel +', metadata: '+ JSON.stringify(metadata)); - metadata_cache[channel] = metadata; - if (ctx.channels[channel] && ctx.channels[channel].index) { - ctx.channels[channel].index.metadata = metadata; - } - historyKeeperBroadcast(ctx, channel, metadata); - } - }; - - const handleGetHistory = function (ctx, seq, user, parsed) { - // parsed[1] is the channel id - // parsed[2] is a validation key or an object containing metadata (optionnal) - // parsed[3] is the last known hash (optionnal) - sendMsg(ctx, user, [seq, 'ACK']); - var channelName = parsed[1]; - var config = parsed[2]; - var metadata = {}; - var lastKnownHash; - - // clients can optionally pass a map of attributes - // if the channel already exists this map will be ignored - // otherwise it will be stored as the initial metadata state for the channel - if (config && typeof config === "object" && !Array.isArray(parsed[2])) { - lastKnownHash = config.lastKnownHash; - metadata = config.metadata || {}; - if (metadata.expire) { - metadata.expire = +metadata.expire * 1000 + (+new Date()); - } - } - metadata.channel = channelName; - metadata.created = +new Date(); - - // if the user sends us an invalid key, we won't be able to validate their messages - // so they'll never get written to the log anyway. Let's just drop their message - // on the floor instead of doing a bunch of extra work - // TODO send them an error message so they know something is wrong - if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { - return void Log.error('HK_INVALID_KEY', metadata.validateKey); - } - - nThen(function (waitFor) { - var w = waitFor(); - - /* unless this is a young channel, we will serve all messages from an offset - this will not include the channel metadata, so we need to explicitly fetch that. - unfortunately, we can't just serve it blindly, since then young channels will - send the metadata twice, so let's do a quick check of what we're going to serve... - */ - getIndex(ctx, channelName, waitFor((err, index) => { - /* if there's an error here, it should be encountered - and handled by the next nThen block. - so, let's just fall through... - */ - if (err) { return w(); } - - - // it's possible that the channel doesn't have metadata - // but in that case there's no point in checking if the channel expired - // or in trying to send metadata, so just skip this block - if (!index || !index.metadata) { return void w(); } - // And then check if the channel is expired. If it is, send the error and abort - // FIXME this is hard to read because 'checkExpired' has side effects - if (checkExpired(ctx, channelName)) { return void waitFor.abort(); } - // always send metadata with GET_HISTORY requests - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(index.metadata)], w); - })); - }).nThen(() => { - let msgCount = 0; - - // TODO compute lastKnownHash in a manner such that it will always skip past the metadata line? - getHistoryAsync(ctx, channelName, lastKnownHash, false, (msg, readMore) => { - if (!msg) { return; } - msgCount++; - // avoid sending the metadata message a second time - if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(msg)], readMore); - }, (err) => { - if (err && err.code !== 'ENOENT') { - if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); } - const parsedMsg = {error:err.message, channel: channelName}; - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - return; - } - - const chan = ctx.channels[channelName]; - - if (msgCount === 0 && !metadata_cache[channelName] && chan && chan.indexOf(user) > -1) { - metadata_cache[channelName] = metadata; - - // the index will have already been constructed and cached at this point - // but it will not have detected any metadata because it hasn't been written yet - // this means that the cache starts off as invalid, so we have to correct it - if (chan && chan.index) { chan.index.metadata = metadata; } - - // new channels will always have their metadata written to a dedicated metadata log - // but any lines after the first which are not amendments in a particular format will be ignored. - // Thus we should be safe from race conditions here if just write metadata to the log as below... - // TODO validate this logic - // otherwise maybe we need to check that the metadata log is empty as well - store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { - if (err) { - // FIXME tell the user that there was a channel error? - return void Log.error('HK_WRITE_METADATA', { - channel: channelName, - error: err, - }); - } - }); - - // write tasks - if(tasks && metadata.expire && typeof(metadata.expire) === 'number') { - // the fun part... - // the user has said they want this pad to expire at some point - tasks.write(metadata.expire, "EXPIRE", [ channelName ], function (err) { - if (err) { - // if there is an error, we don't want to crash the whole server... - // just log it, and if there's a problem you'll be able to fix it - // at a later date with the provided information - Log.error('HK_CREATE_EXPIRE_TASK', err); - Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); - } - }); - } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(metadata)]); - } - - // End of history message: - let parsedMsg = {state: 1, channel: channelName}; - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - }); - }); - }; - - const handleGetHistoryRange = function (ctx, seq, user, parsed) { - var channelName = parsed[1]; - var map = parsed[2]; - if (!(map && typeof(map) === 'object')) { - return void sendMsg(ctx, user, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); - } - - var oldestKnownHash = map.from; - var desiredMessages = map.count; - var desiredCheckpoint = map.cpCount; - var txid = map.txid; - if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { - return void sendMsg(ctx, user, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); - } - - if (!txid) { - return void sendMsg(ctx, user, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); - } - - sendMsg(ctx, user, [seq, 'ACK']); - return void getOlderHistory(channelName, oldestKnownHash, function (messages) { - var toSend = []; - if (typeof (desiredMessages) === "number") { - toSend = messages.slice(-desiredMessages); - } else { - let cpCount = 0; - for (var i = messages.length - 1; i >= 0; i--) { - if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { - cpCount++; - } - toSend.unshift(messages[i]); - if (cpCount >= desiredCheckpoint) { break; } - } - } - toSend.forEach(function (msg) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, - JSON.stringify(['HISTORY_RANGE', txid, msg])]); - }); - - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, - JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) - ]); - }); - }; - - const handleGetFullHistory = function (ctx, seq, user, parsed) { - // parsed[1] is the channel id - // parsed[2] is a validation key (optionnal) - // parsed[3] is the last known hash (optionnal) - sendMsg(ctx, user, [seq, 'ACK']); - - // FIXME should we send metadata here too? - // none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22) - return void getHistoryAsync(ctx, parsed[1], -1, false, (msg, readMore) => { - if (!msg) { return; } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(['FULL_HISTORY', msg])], readMore); - }, (err) => { - let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; - if (err) { - Log.error('HK_GET_FULL_HISTORY', err.stack); - parsedMsg = ['ERROR', parsed[1], err.message]; - } - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify(parsedMsg)]); - }); - }; - - const handleRPC = function (ctx, seq, user, parsed) { - if (typeof(rpc) !== 'function') { return; } - - /* RPC Calls... */ - var rpc_call = parsed.slice(1); - - sendMsg(ctx, user, [seq, 'ACK']); - try { - // slice off the sequence number and pass in the rest of the message - rpc(ctx, rpc_call, function (err, output) { - if (err) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', err])]); - return; - } - var msg = rpc_call[0].slice(); - if (msg[3] === 'REMOVE_OWNED_CHANNEL') { - onChannelDeleted(ctx, msg[4]); - } - if (msg[3] === 'CLEAR_OWNED_CHANNEL') { - onChannelCleared(ctx, msg[4]); - } - - if (msg[3] === 'SET_METADATA') { // or whatever we call the RPC???? - // make sure we update our cache of metadata - // or at least invalidate it and force other mechanisms to recompute its state - // 'output' could be the new state as computed by rpc - onChannelMetadataChanged(ctx, msg[4].channel, output[1]); - } - - // unauthenticated RPC calls have a different message format - if (msg[0] === "WRITE_PRIVATE_MESSAGE" && output && output.channel) { - // this is an inline reimplementation of historyKeeperBroadcast - // because if we use that directly it will bypass signature validation - // which opens up the user to malicious behaviour - let chan = ctx.channels[output.channel]; - if (chan && chan.length) { - chan.forEach(function (user) { - sendMsg(ctx, user, output.message); - //[0, null, 'MSG', user.id, JSON.stringify(output.message)]); - }); - } - // rpc and anonRpc expect their responses to be of a certain length - // and we've already used the output of the rpc call, so overwrite it - output = [null, null, null]; - } - - // finally, send a response to the client that sent the RPC - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0]].concat(output))]); - }); - } catch (e) { - sendMsg(ctx, user, [0, HISTORY_KEEPER_ID, 'MSG', user.id, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); - } - }; - - /* onDirectMessage - * exported for use by the netflux-server - * parses and handles all direct messages directed to the history keeper - * check if it's expired and execute all the associated side-effects - * routes queries to the appropriate handlers - */ - const onDirectMessage = function (ctx, seq, user, json) { - Log.silly('HK_MESSAGE', json); - - let parsed; - try { - parsed = JSON.parse(json[2]); - } catch (err) { - Log.error("HK_PARSE_CLIENT_MESSAGE", json); - return; - } - - // If the requested history is for an expired channel, abort - // Note the if we don't have the keys for that channel in metadata_cache, we'll - // have to abort later (once we know the expiration time) - if (checkExpired(ctx, parsed[1])) { return; } - - if (parsed[0] === 'GET_HISTORY') { - return void handleGetHistory(ctx, seq, user, parsed); - } - if (parsed[0] === 'GET_HISTORY_RANGE') { - return void handleGetHistoryRange(ctx, seq, user, parsed); - } - if (parsed[0] === 'GET_FULL_HISTORY') { - return void handleGetFullHistory(ctx, seq, user, parsed); - } - return void handleRPC(ctx, seq, user, parsed); - }; - - return { - id: HISTORY_KEEPER_ID, - setConfig: setConfig, - onChannelMessage: onChannelMessage, - dropChannel: dropChannel, - checkExpired: checkExpired, - onDirectMessage: onDirectMessage, - }; -}; - -}()); diff --git a/import b/import deleted file mode 100755 index 1bf1d5de9..000000000 --- a/import +++ /dev/null @@ -1,65 +0,0 @@ -#!/usr/bin/env node -/* globals process */ - -var Config = require("./config"); -var Fs = require("fs"); -var Storage = require(Config.storage); - -var args = process.argv.slice(2); - -if (!args.length) { - console.log("Insufficient arguments!"); - console.log("Pass a path to a database backup!"); - process.exit(); -} - -var dump = Fs.readFileSync(args[0], 'utf-8'); - -var ready = function (store) { - var lock = 0; - dump.split(/\n/) - .filter(function (line) { - return line; - }) - .forEach(function (line, i) { - lock++; - var parts; - - var channel; - var msg; - - line.replace(/^(.*?)\|(.*)$/, function (all, c, m) { - channel = c; - msg = m; - return ''; - }); - - if (!channel || !msg) { - console.log("BAD LINE on line %s", i); - return; - } - - try { - JSON.parse(msg); - } catch (err) { - console.log("BAD LINE on line %s", i); - console.log(msg); - console.log(); - } - - store.message(channel, msg, function () { - console.log(line); - lock--; - if (!lock) { - console.log("DONE"); - process.exit(0); - } - }); - }); -}; - -Storage.create(Config, function (store) { - console.log("READY"); - ready(store); -}); - diff --git a/lib/api.js b/lib/api.js new file mode 100644 index 000000000..8e6725039 --- /dev/null +++ b/lib/api.js @@ -0,0 +1,34 @@ +/* jshint esversion: 6 */ +const WebSocketServer = require('ws').Server; +const NetfluxSrv = require('chainpad-server'); + +module.exports.create = function (config) { + // asynchronously create a historyKeeper and RPC together + require('./historyKeeper.js').create(config, function (err, historyKeeper) { + if (err) { throw err; } + + var log = config.log; + + // spawn ws server and attach netflux event handlers + NetfluxSrv.create(new WebSocketServer({ server: config.httpServer})) + .on('channelClose', historyKeeper.channelClose) + .on('channelMessage', historyKeeper.channelMessage) + .on('channelOpen', historyKeeper.channelOpen) + .on('sessionClose', historyKeeper.sessionClose) + .on('error', function (error, label, info) { + if (!error) { return; } + /* labels: + SEND_MESSAGE_FAIL, SEND_MESSAGE_FAIL_2, FAIL_TO_DISCONNECT, + FAIL_TO_TERMINATE, HANDLE_CHANNEL_LEAVE, NETFLUX_BAD_MESSAGE, + NETFLUX_WEBSOCKET_ERROR + */ + log.error(label, { + code: error.code, + message: error.message, + stack: error.stack, + info: info, + }); + }) + .register(historyKeeper.id, historyKeeper.directMessage); + }); +}; diff --git a/lib/commands/admin-rpc.js b/lib/commands/admin-rpc.js new file mode 100644 index 000000000..a7cb59798 --- /dev/null +++ b/lib/commands/admin-rpc.js @@ -0,0 +1,160 @@ +/*jshint esversion: 6 */ +const nThen = require("nthen"); +const getFolderSize = require("get-folder-size"); +const Util = require("../common-util"); +const Ulimit = require("ulimit"); + +var Fs = require("fs"); + +var Admin = module.exports; + +var getFileDescriptorCount = function (Env, server, cb) { + Fs.readdir('/proc/self/fd', function(err, list) { + if (err) { return void cb(err); } + cb(void 0, list.length); + }); +}; + +var getFileDescriptorLimit = function (env, server, cb) { + Ulimit(cb); +}; + +var getActiveSessions = function (Env, Server, cb) { + var stats = Server.getSessionStats(); + cb(void 0, [ + stats.total, + stats.unique + ]); +}; + +var shutdown = function (Env, Server, cb) { + if (true) { + return void cb('E_NOT_IMPLEMENTED'); + } + + // disconnect all users and reject new connections + Server.shutdown(); + + // stop all intervals that may be running + Object.keys(Env.intervals).forEach(function (name) { + clearInterval(Env.intervals[name]); + }); + + // set a flag to prevent incoming database writes + // wait until all pending writes are complete + // then process.exit(0); + // and allow system functionality to restart the server +}; + +var getRegisteredUsers = function (Env, Server, cb) { + Env.batchRegisteredUsers('', cb, function (done) { + var dir = Env.paths.pin; + var folders; + var users = 0; + nThen(function (waitFor) { + Fs.readdir(dir, waitFor(function (err, list) { + if (err) { + waitFor.abort(); + return void done(err); + } + folders = list; + })); + }).nThen(function (waitFor) { + folders.forEach(function (f) { + var dir = Env.paths.pin + '/' + f; + Fs.readdir(dir, waitFor(function (err, list) { + if (err) { return; } + users += list.length; + })); + }); + }).nThen(function () { + done(void 0, users); + }); + }); +}; + +var getDiskUsage = function (Env, Server, cb) { + Env.batchDiskUsage('', cb, function (done) { + var data = {}; + nThen(function (waitFor) { + getFolderSize('./', waitFor(function(err, info) { + data.total = info; + })); + getFolderSize(Env.paths.pin, waitFor(function(err, info) { + data.pin = info; + })); + getFolderSize(Env.paths.blob, waitFor(function(err, info) { + data.blob = info; + })); + getFolderSize(Env.paths.staging, waitFor(function(err, info) { + data.blobstage = info; + })); + getFolderSize(Env.paths.block, waitFor(function(err, info) { + data.block = info; + })); + getFolderSize(Env.paths.data, waitFor(function(err, info) { + data.datastore = info; + })); + }).nThen(function () { + done(void 0, data); + }); + }); +}; + +var getActiveChannelCount = function (Env, Server, cb) { + cb(void 0, Server.getActiveChannelCount()); +}; + +var flushCache = function (Env, Server, cb) { + Env.flushCache(); + cb(void 0, true); +}; + + +// CryptPad_AsyncStore.rpc.send('ADMIN', ['SET_DEFAULT_STORAGE_LIMIT', 1024 * 1024 * 1024 /* 1GB */], console.log) +var setDefaultStorageLimit = function (Env, Server, cb, data) { + var value = Array.isArray(data) && data[1]; + if (typeof(value) !== 'number' || value <= 0) { return void cb('EINVAL'); } + var previous = Env.defaultStorageLimit; + var change = { + previous: previous, + current: value, + }; + + Env.defaultStorageLimit = value; + Env.Log.info('DEFAULT_STORAGE_LIMIT_UPDATE', change); + + cb(void 0, change); +}; + +var commands = { + ACTIVE_SESSIONS: getActiveSessions, + ACTIVE_PADS: getActiveChannelCount, + REGISTERED_USERS: getRegisteredUsers, + DISK_USAGE: getDiskUsage, + FLUSH_CACHE: flushCache, + SHUTDOWN: shutdown, + GET_FILE_DESCRIPTOR_COUNT: getFileDescriptorCount, + GET_FILE_DESCRIPTOR_LIMIT: getFileDescriptorLimit, + SET_DEFAULT_STORAGE_LIMIT: setDefaultStorageLimit, +}; + +Admin.command = function (Env, safeKey, data, _cb, Server) { + var cb = Util.once(Util.mkAsync(_cb)); + + var admins = Env.admins; + //var unsafeKey = Util.unescapeKeyCharacters(safeKey); + if (admins.indexOf(safeKey) === -1) { + return void cb("FORBIDDEN"); + } + + var command = commands[data[0]]; + + if (typeof(command) === 'function') { + return void command(Env, Server, cb, data); + } + + return void cb('UNHANDLED_ADMIN_COMMAND'); +}; + + diff --git a/lib/commands/block.js b/lib/commands/block.js new file mode 100644 index 000000000..3a264c167 --- /dev/null +++ b/lib/commands/block.js @@ -0,0 +1,172 @@ +/*jshint esversion: 6 */ +/* globals Buffer*/ +var Block = module.exports; + +const Fs = require("fs"); +const Fse = require("fs-extra"); +const Path = require("path"); +const Nacl = require("tweetnacl/nacl-fast"); +const nThen = require("nthen"); + +const Util = require("../common-util"); + +/* + We assume that the server is secured against MitM attacks + via HTTPS, and that malicious actors do not have code execution + capabilities. If they do, we have much more serious problems. + + The capability to replay a block write or remove results in either + a denial of service for the user whose block was removed, or in the + case of a write, a rollback to an earlier password. + + Since block modification is destructive, this can result in loss + of access to the user's drive. + + So long as the detached signature is never observed by a malicious + party, and the server discards it after proof of knowledge, replays + are not possible. However, this precludes verification of the signature + at a later time. + + Despite this, an integrity check is still possible by the original + author of the block, since we assume that the block will have been + encrypted with xsalsa20-poly1305 which is authenticated. +*/ +var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS + // convert the public key to a Uint8Array and validate it + if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } + + var u8_public_key; + try { + u8_public_key = Nacl.util.decodeBase64(publicKey); + } catch (e) { + return void cb('E_INVALID_KEY'); + } + + var u8_signature; + try { + u8_signature = Nacl.util.decodeBase64(signature); + } catch (e) { + Env.Log.error('INVALID_BLOCK_SIGNATURE', e); + return void cb('E_INVALID_SIGNATURE'); + } + + // convert the block to a Uint8Array + var u8_block; + try { + u8_block = Nacl.util.decodeBase64(block); + } catch (e) { + return void cb('E_INVALID_BLOCK'); + } + + // take its hash + var hash = Nacl.hash(u8_block); + + // validate the signature against the hash of the content + var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); + + // existing authentication ensures that users cannot replay old blocks + + // call back with (err) if unsuccessful + if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } + + return void cb(null, u8_block); +}; + +var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS + // prepare publicKey to be used as a file name + var safeKey = Util.escapeKeyCharacters(publicKey); + + // validate safeKey + if (typeof(safeKey) !== 'string') { + return; + } + + // derive the full path + // /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd + return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); +}; + +Block.writeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS + //console.log(msg); + var publicKey = msg[0]; + var signature = msg[1]; + var block = msg[2]; + + validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { + if (e) { return void cb(e); } + if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } + + // derive the filepath + var path = createLoginBlockPath(Env, publicKey); + + // make sure the path is valid + if (typeof(path) !== 'string') { + return void cb('E_INVALID_BLOCK_PATH'); + } + + var parsed = Path.parse(path); + if (!parsed || typeof(parsed.dir) !== 'string') { + return void cb("E_INVALID_BLOCK_PATH_2"); + } + + nThen(function (w) { + // make sure the path to the file exists + Fse.mkdirp(parsed.dir, w(function (e) { + if (e) { + w.abort(); + cb(e); + } + })); + }).nThen(function () { + // actually write the block + + // flow is dumb and I need to guard against this which will never happen + /*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ + /*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ + Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { + if (err) { return void cb(err); } + cb(); + }); + }); + }); +}; + +/* + When users write a block, they upload the block, and provide + a signature proving that they deserve to be able to write to + the location determined by the public key. + + When removing a block, there is nothing to upload, but we need + to sign something. Since the signature is considered sensitive + information, we can just sign some constant and use that as proof. + +*/ +Block.removeLoginBlock = function (Env, safeKey, msg, cb) { // FIXME BLOCKS + var publicKey = msg[0]; + var signature = msg[1]; + var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant + + validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { + if (e) { return void cb(e); } + // derive the filepath + var path = createLoginBlockPath(Env, publicKey); + + // make sure the path is valid + if (typeof(path) !== 'string') { + return void cb('E_INVALID_BLOCK_PATH'); + } + + // FIXME COLDSTORAGE + Fs.unlink(path, function (err) { + Env.Log.info('DELETION_BLOCK_BY_OWNER_RPC', { + publicKey: publicKey, + path: path, + status: err? String(err): 'SUCCESS', + }); + + if (err) { return void cb(err); } + cb(); + }); + }); +}; + diff --git a/lib/commands/channel.js b/lib/commands/channel.js new file mode 100644 index 000000000..10131d9d8 --- /dev/null +++ b/lib/commands/channel.js @@ -0,0 +1,301 @@ +/*jshint esversion: 6 */ +const Channel = module.exports; + +const Util = require("../common-util"); +const nThen = require("nthen"); +const Core = require("./core"); +const Metadata = require("./metadata"); +const HK = require("../hk-util"); + +Channel.clearOwnedChannel = function (Env, safeKey, channelId, cb, Server) { + if (typeof(channelId) !== 'string' || channelId.length !== 32) { + return cb('INVALID_ARGUMENTS'); + } + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + Metadata.getMetadata(Env, channelId, function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } + // Confirm that the channel is owned by the user in question + if (!Core.isOwner(metadata, unsafeKey)) { + return void cb('INSUFFICIENT_PERMISSIONS'); + } + return void Env.msgStore.clearChannel(channelId, function (e) { + if (e) { return void cb(e); } + cb(); + + const channel_cache = Env.channel_cache; + + const clear = function () { + // delete the channel cache because it will have been invalidated + delete channel_cache[channelId]; + }; + + nThen(function (w) { + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, [ + 0, + Env.historyKeeper.id, + 'MSG', + userId, + JSON.stringify({ + error: 'ECLEARED', + channel: channelId + }) + ], w()); + }); + }).nThen(function () { + clear(); + }).orTimeout(function () { + Env.Log.warn("ON_CHANNEL_CLEARED_TIMEOUT", channelId); + clear(); + }, 30000); + }); + }); +}; + +Channel.removeOwnedChannel = function (Env, safeKey, channelId, cb, Server) { + if (typeof(channelId) !== 'string' || !Core.isValidId(channelId)) { + return cb('INVALID_ARGUMENTS'); + } + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + if (Env.blobStore.isFileId(channelId)) { + var blobId = channelId; + + return void nThen(function (w) { + // check if you have permissions + Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { + if (err || !owned) { + w.abort(); + return void cb("INSUFFICIENT_PERMISSIONS"); + } + })); + }).nThen(function (w) { + // remove the blob + return void Env.blobStore.archive.blob(blobId, w(function (err) { + Env.Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { + safeKey: safeKey, + blobId: blobId, + status: err? String(err): 'SUCCESS', + }); + if (err) { + w.abort(); + return void cb(err); + } + })); + }).nThen(function () { + // archive the proof + return void Env.blobStore.archive.proof(safeKey, blobId, function (err) { + Env.Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { + safeKey: safeKey, + blobId: blobId, + status: err? String(err): 'SUCCESS', + }); + if (err) { + return void cb("E_PROOF_REMOVAL"); + } + cb(void 0, 'OK'); + }); + }); + } + + Metadata.getMetadata(Env, channelId, function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { return void cb('E_NO_OWNERS'); } + if (!Core.isOwner(metadata, unsafeKey)) { + return void cb('INSUFFICIENT_PERMISSIONS'); + } + // temporarily archive the file + return void Env.msgStore.archiveChannel(channelId, function (e) { + Env.Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { + unsafeKey: unsafeKey, + channelId: channelId, + status: e? String(e): 'SUCCESS', + }); + if (e) { + return void cb(e); + } + cb(void 0, 'OK'); + + const channel_cache = Env.channel_cache; + const metadata_cache = Env.metadata_cache; + + const clear = function () { + delete channel_cache[channelId]; + Server.clearChannel(channelId); + delete metadata_cache[channelId]; + }; + + // an owner of a channel deleted it + nThen(function (w) { + // close the channel in the store + Env.msgStore.closeChannel(channelId, w()); + }).nThen(function (w) { + // Server.channelBroadcast would be better + // but we can't trust it to track even one callback, + // let alone many in parallel. + // so we simulate it on this side to avoid race conditions + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, [ + 0, + Env.historyKeeper.id, + "MSG", + userId, + JSON.stringify({ + error: 'EDELETED', + channel: channelId, + }) + ], w()); + }); + }).nThen(function () { + // clear the channel's data from memory + // once you've sent everyone a notice that the channel has been deleted + clear(); + }).orTimeout(function () { + Env.Log.warn('ON_CHANNEL_DELETED_TIMEOUT', channelId); + clear(); + }, 30000); + }); + }); +}; + +Channel.trimHistory = function (Env, safeKey, data, cb) { + if (!(data && typeof(data.channel) === 'string' && typeof(data.hash) === 'string' && data.hash.length === 64)) { + return void cb('INVALID_ARGS'); + } + + var channelId = data.channel; + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + var hash = data.hash; + + nThen(function (w) { + Metadata.getMetadata(Env, channelId, w(function (err, metadata) { + if (err) { return void cb(err); } + if (!Core.hasOwners(metadata)) { + w.abort(); + return void cb('E_NO_OWNERS'); + } + if (!Core.isOwner(metadata, unsafeKey)) { + w.abort(); + return void cb("INSUFFICIENT_PERMISSIONS"); + } + // else fall through to the next block + })); + }).nThen(function () { + Env.msgStore.trimChannel(channelId, hash, function (err) { + if (err) { return void cb(err); } + // clear historyKeeper's cache for this channel + Env.historyKeeper.channelClose(channelId); + cb(void 0, 'OK'); + delete Env.channel_cache[channelId]; + delete Env.metadata_cache[channelId]; + }); + }); +}; + +var ARRAY_LINE = /^\[/; + +/* Files can contain metadata but not content + call back with true if the channel log has no content other than metadata + otherwise false +*/ +Channel.isNewChannel = function (Env, channel, cb) { + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length !== 32) { return void cb('INVALID_CHAN'); } + + // TODO replace with readMessagesBin + var done = false; + Env.msgStore.getMessages(channel, function (msg) { + if (done) { return; } + try { + if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { + done = true; + return void cb(void 0, false); + } + } catch (e) { + Env.WARN('invalid message read from store', e); + } + }, function () { + if (done) { return; } + // no more messages... + cb(void 0, true); + }); +}; + +/* writePrivateMessage + allows users to anonymously send a message to the channel + prevents their netflux-id from being stored in history + and from being broadcast to anyone that might currently be in the channel + + Otherwise behaves the same as sending to a channel +*/ +Channel.writePrivateMessage = function (Env, args, _cb, Server, netfluxId) { + var cb = Util.once(Util.mkAsync(_cb)); + + var channelId = args[0]; + var msg = args[1]; + + // don't bother handling empty messages + if (!msg) { return void cb("INVALID_MESSAGE"); } + + // don't support anything except regular channels + if (!Core.isValidId(channelId) || channelId.length !== 32) { + return void cb("INVALID_CHAN"); + } + + // We expect a modern netflux-websocket-server instance + // if this API isn't here everything will fall apart anyway + if (!(Server && typeof(Server.send) === 'function')) { + return void cb("NOT_IMPLEMENTED"); + } + + nThen(function (w) { + Metadata.getMetadataRaw(Env, channelId, w(function (err, metadata) { + if (err) { + w.abort(); + Env.Log.error('HK_WRITE_PRIVATE_MESSAGE', err); + return void cb('METADATA_ERR'); + } + + if (!metadata || !metadata.restricted) { + return; + } + + var session = HK.getNetfluxSession(Env, netfluxId); + var allowed = HK.listAllowedUsers(metadata); + + if (HK.isUserSessionAllowed(allowed, session)) { return; } + + w.abort(); + cb('INSUFFICIENT_PERMISSIONS'); + })); + }).nThen(function () { + // historyKeeper expects something with an 'id' attribute + // it will fail unless you provide it, but it doesn't need anything else + var channelStruct = { + id: channelId, + }; + + // construct a message to store and broadcast + var fullMessage = [ + 0, // idk + null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way + "MSG", // indicate that this is a MSG + channelId, // channel id + msg // the actual message content. Generally a string + ]; + + + // historyKeeper already knows how to handle metadata and message validation, so we just pass it off here + // if the message isn't valid it won't be stored. + Env.historyKeeper.channelMessage(Server, channelStruct, fullMessage); + + Server.getChannelUserList(channelId).forEach(function (userId) { + Server.send(userId, fullMessage); + }); + + cb(); + }); +}; + diff --git a/lib/commands/core.js b/lib/commands/core.js new file mode 100644 index 000000000..d7add69b4 --- /dev/null +++ b/lib/commands/core.js @@ -0,0 +1,190 @@ +/*jshint esversion: 6 */ +/* globals process */ +const Core = module.exports; +const Util = require("../common-util"); +const escapeKeyCharacters = Util.escapeKeyCharacters; + +/* Use Nacl for checking signatures of messages */ +const Nacl = require("tweetnacl/nacl-fast"); + + +Core.DEFAULT_LIMIT = 50 * 1024 * 1024; +Core.SESSION_EXPIRATION_TIME = 60 * 1000; + +Core.isValidId = function (chan) { + return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && + [32, 48].indexOf(chan.length) > -1; +}; + +var makeToken = Core.makeToken = function () { + return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) + .toString(16); +}; + +Core.makeCookie = function (token) { + var time = (+new Date()); + time -= time % 5000; + + return [ + time, + process.pid, + token + ]; +}; + +var parseCookie = function (cookie) { + if (!(cookie && cookie.split)) { return null; } + + var parts = cookie.split('|'); + if (parts.length !== 3) { return null; } + + var c = {}; + c.time = new Date(parts[0]); + c.pid = Number(parts[1]); + c.seq = parts[2]; + return c; +}; + +Core.getSession = function (Sessions, key) { + var safeKey = escapeKeyCharacters(key); + if (Sessions[safeKey]) { + Sessions[safeKey].atime = +new Date(); + return Sessions[safeKey]; + } + var user = Sessions[safeKey] = {}; + user.atime = +new Date(); + user.tokens = [ + makeToken() + ]; + return user; +}; + +Core.expireSession = function (Sessions, safeKey) { + var session = Sessions[safeKey]; + if (!session) { return; } + if (session.blobstage) { + session.blobstage.close(); + } + delete Sessions[safeKey]; +}; + +Core.expireSessionAsync = function (Env, safeKey, cb) { + setTimeout(function () { + Core.expireSession(Env.Sessions, safeKey); + cb(void 0, 'OK'); + }); +}; + +var isTooOld = function (time, now) { + return (now - time) > 300000; +}; + +Core.expireSessions = function (Sessions) { + var now = +new Date(); + Object.keys(Sessions).forEach(function (safeKey) { + var session = Sessions[safeKey]; + if (session && isTooOld(session.atime, now)) { + Core.expireSession(Sessions, safeKey); + } + }); +}; + +var addTokenForKey = function (Sessions, publicKey, token) { + if (!Sessions[publicKey]) { throw new Error('undefined user'); } + + var user = Core.getSession(Sessions, publicKey); + user.tokens.push(token); + user.atime = +new Date(); + if (user.tokens.length > 2) { user.tokens.shift(); } +}; + +Core.isValidCookie = function (Sessions, publicKey, cookie) { + var parsed = parseCookie(cookie); + if (!parsed) { return false; } + + var now = +new Date(); + + if (!parsed.time) { return false; } + if (isTooOld(parsed.time, now)) { + return false; + } + + // different process. try harder + if (process.pid !== parsed.pid) { + return false; + } + + var user = Core.getSession(Sessions, publicKey); + if (!user) { return false; } + + var idx = user.tokens.indexOf(parsed.seq); + if (idx === -1) { return false; } + + if (idx > 0) { + // make a new token + addTokenForKey(Sessions, publicKey, Core.makeToken()); + } + + return true; +}; + +Core.checkSignature = function (Env, signedMsg, signature, publicKey) { + if (!(signedMsg && publicKey)) { return false; } + + var signedBuffer; + var pubBuffer; + var signatureBuffer; + + try { + signedBuffer = Nacl.util.decodeUTF8(signedMsg); + } catch (e) { + Env.Log.error('INVALID_SIGNED_BUFFER', signedMsg); + return null; + } + + try { + pubBuffer = Nacl.util.decodeBase64(publicKey); + } catch (e) { + return false; + } + + try { + signatureBuffer = Nacl.util.decodeBase64(signature); + } catch (e) { + return false; + } + + if (pubBuffer.length !== 32) { + Env.Log.error('PUBLIC_KEY_LENGTH', publicKey); + return false; + } + + if (signatureBuffer.length !== 64) { + return false; + } + + return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer); +}; + +// E_NO_OWNERS +Core.hasOwners = function (metadata) { + return Boolean(metadata && Array.isArray(metadata.owners)); +}; + +Core.hasPendingOwners = function (metadata) { + return Boolean(metadata && Array.isArray(metadata.pending_owners)); +}; + +// INSUFFICIENT_PERMISSIONS +Core.isOwner = function (metadata, unsafeKey) { + return metadata.owners.indexOf(unsafeKey) !== -1; +}; + +Core.isPendingOwner = function (metadata, unsafeKey) { + return metadata.pending_owners.indexOf(unsafeKey) !== -1; +}; + +Core.haveACookie = function (Env, safeKey, cb) { + cb(); +}; + diff --git a/lib/commands/metadata.js b/lib/commands/metadata.js new file mode 100644 index 000000000..802942fcb --- /dev/null +++ b/lib/commands/metadata.js @@ -0,0 +1,197 @@ +/*jshint esversion: 6 */ +const Data = module.exports; + +const Meta = require("../metadata"); +const WriteQueue = require("../write-queue"); +const Core = require("./core"); +const Util = require("../common-util"); +const HK = require("../hk-util"); + +Data.getMetadataRaw = function (Env, channel /* channelName */, _cb) { + const cb = Util.once(Util.mkAsync(_cb)); + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length !== HK.STANDARD_CHANNEL_LENGTH) { return cb("INVALID_CHAN_LENGTH"); } + + var cached = Env.metadata_cache[channel]; + if (HK.isMetadataMessage(cached)) { + return void cb(void 0, cached); + } + + Env.batchMetadata(channel, cb, function (done) { + var ref = {}; + var lineHandler = Meta.createLineHandler(ref, Env.Log.error); + return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) { + if (err) { + // stream errors? + return void done(err); + } + done(void 0, ref.meta); + }); + }); +}; + +Data.getMetadata = function (Env, channel, cb, Server, netfluxId) { + Data.getMetadataRaw(Env, channel, function (err, metadata) { + if (err) { return void cb(err); } + + if (!(metadata && metadata.restricted)) { + // if it's not restricted then just call back + return void cb(void 0, metadata); + } + + const session = HK.getNetfluxSession(Env, netfluxId); + const allowed = HK.listAllowedUsers(metadata); + + if (!HK.isUserSessionAllowed(allowed, session)) { + return void cb(void 0, { + restricted: metadata.restricted, + allowed: allowed, + rejected: true, + }); + } + cb(void 0, metadata); + }); +}; + +/* setMetadata + - write a new line to the metadata log if a valid command is provided + - data is an object: { + channel: channelId, + command: metadataCommand (string), + value: value + } +*/ +var queueMetadata = WriteQueue(); +Data.setMetadata = function (Env, safeKey, data, cb, Server) { + var unsafeKey = Util.unescapeKeyCharacters(safeKey); + + var channel = data.channel; + var command = data.command; + if (!channel || !Core.isValidId(channel)) { return void cb ('INVALID_CHAN'); } + if (!command || typeof (command) !== 'string') { return void cb('INVALID_COMMAND'); } + if (Meta.commands.indexOf(command) === -1) { return void cb('UNSUPPORTED_COMMAND'); } + + queueMetadata(channel, function (next) { + Data.getMetadataRaw(Env, channel, function (err, metadata) { + if (err) { + cb(err); + return void next(); + } + if (!Core.hasOwners(metadata)) { + cb('E_NO_OWNERS'); + return void next(); + } + + // if you are a pending owner and not an owner + // you can either ADD_OWNERS, or RM_PENDING_OWNERS + // and you should only be able to add yourself as an owner + // everything else should be rejected + // else if you are not an owner + // you should be rejected + // else write the command + + // Confirm that the channel is owned by the user in question + // or the user is accepting a pending ownership offer + if (Core.hasPendingOwners(metadata) && + Core.isPendingOwner(metadata, unsafeKey) && + !Core.isOwner(metadata, unsafeKey)) { + + // If you are a pending owner, make sure you can only add yourelf as an owner + if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') + || !Array.isArray(data.value) + || data.value.length !== 1 + || data.value[0] !== unsafeKey) { + cb('INSUFFICIENT_PERMISSIONS'); + return void next(); + } + // FIXME wacky fallthrough is hard to read + // we could pass this off to a writeMetadataCommand function + // and make the flow easier to follow + } else if (!Core.isOwner(metadata, unsafeKey)) { + cb('INSUFFICIENT_PERMISSIONS'); + return void next(); + } + + // Add the new metadata line + var line = [command, data.value, +new Date()]; + var changed = false; + try { + changed = Meta.handleCommand(metadata, line); + } catch (e) { + cb(e); + return void next(); + } + + // if your command is valid but it didn't result in any change to the metadata, + // call back now and don't write any "useless" line to the log + if (!changed) { + cb(void 0, metadata); + return void next(); + } + Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { + if (e) { + cb(e); + return void next(); + } + + // send the message back to the person who changed it + // since we know they're allowed to see it + cb(void 0, metadata); + next(); + + const metadata_cache = Env.metadata_cache; + + // update the cached metadata + metadata_cache[channel] = metadata; + + // it's easy to check if the channel is restricted + const isRestricted = metadata.restricted; + // and these values will be used in any case + const s_metadata = JSON.stringify(metadata); + const hk_id = Env.historyKeeper.id; + + if (!isRestricted) { + // pre-allow-list behaviour + // if it's not restricted, broadcast the new metadata to everyone + return void Server.channelBroadcast(channel, s_metadata, hk_id); + } + + // otherwise derive the list of users (unsafeKeys) that are allowed to stay + const allowed = HK.listAllowedUsers(metadata); + // anyone who is not allowed will get the same error message + const s_error = JSON.stringify({ + error: 'ERESTRICTED', + channel: channel, + }); + + // iterate over the channel's userlist + const toRemove = []; + Server.getChannelUserList(channel).forEach(function (userId) { + const session = HK.getNetfluxSession(Env, userId); + + // if the user is allowed to remain, send them the metadata + if (HK.isUserSessionAllowed(allowed, session)) { + return void Server.send(userId, [ + 0, + hk_id, + 'MSG', + userId, + s_metadata + ], function () {}); + } + // otherwise they are not in the list. + // send them an error and kick them out! + Server.send(userId, [ + 0, + hk_id, + 'MSG', + userId, + s_error + ], function () {}); + }); + + Server.removeFromChannel(channel, toRemove); + }); + }); + }); +}; diff --git a/lib/commands/pin-rpc.js b/lib/commands/pin-rpc.js new file mode 100644 index 000000000..4aaeb7aaf --- /dev/null +++ b/lib/commands/pin-rpc.js @@ -0,0 +1,568 @@ +/*jshint esversion: 6 */ +const Core = require("./core"); + +const BatchRead = require("../batch-read"); +const Pins = require("../pins"); + +const Pinning = module.exports; +const Nacl = require("tweetnacl/nacl-fast"); +const Util = require("../common-util"); +const nThen = require("nthen"); +const Saferphore = require("saferphore"); + +//const escapeKeyCharacters = Util.escapeKeyCharacters; +const unescapeKeyCharacters = Util.unescapeKeyCharacters; + +var sumChannelSizes = function (sizes) { + return Object.keys(sizes).map(function (id) { return sizes[id]; }) + .filter(function (x) { + // only allow positive numbers + return !(typeof(x) !== 'number' || x <= 0); + }) + .reduce(function (a, b) { return a + b; }, 0); +}; + +// FIXME it's possible for this to respond before the server has had a chance +// to fetch the limits. Maybe we should respond with an error... +// or wait until we actually know the limits before responding +var getLimit = Pinning.getLimit = function (Env, safeKey, cb) { + var unsafeKey = unescapeKeyCharacters(safeKey); + var limit = Env.limits[unsafeKey]; + var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? + Env.defaultStorageLimit: Core.DEFAULT_LIMIT; + + var toSend = limit && typeof(limit.limit) === "number"? + [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; + + cb(void 0, toSend); +}; + +const answerDeferred = function (Env, channel, bool) { + const pending = Env.pendingPinInquiries; + const stack = pending[channel]; + if (!Array.isArray(stack)) { return; } + + delete pending[channel]; + + stack.forEach(function (cb) { + cb(void 0, bool); + }); +}; + +var addPinned = function ( + Env, + safeKey /*:string*/, + channelList /*Array*/, + cb /*:()=>void*/) +{ + channelList.forEach(function (channel) { + Pins.addUserPinToState(Env.pinnedPads, safeKey, channel); + answerDeferred(Env, channel, true); + }); + cb(); +}; + +const isEmpty = function (obj) { + if (!obj || typeof(obj) !== 'object') { return true; } + for (var key in obj) { + if (obj.hasOwnProperty(key)) { return true; } + } + return false; +}; + +const deferUserTask = function (Env, safeKey, deferred) { + const pending = Env.pendingUnpins; + (pending[safeKey] = pending[safeKey] || []).push(deferred); +}; + +const runUserDeferred = function (Env, safeKey) { + const pending = Env.pendingUnpins; + const stack = pending[safeKey]; + if (!Array.isArray(stack)) { return; } + delete pending[safeKey]; + + stack.forEach(function (cb) { + cb(); + }); +}; + +const runRemainingDeferred = function (Env) { + const pending = Env.pendingUnpins; + for (var safeKey in pending) { + runUserDeferred(Env, safeKey); + } +}; + +const removeSelfFromPinned = function (Env, safeKey, channelList) { + channelList.forEach(function (channel) { + const channelPinStatus = Env.pinnedPads[channel]; + if (!channelPinStatus) { return; } + delete channelPinStatus[safeKey]; + if (isEmpty(channelPinStatus)) { + delete Env.pinnedPads[channel]; + } + }); +}; + +var removePinned = function ( + Env, + safeKey /*:string*/, + channelList /*Array*/, + cb /*:()=>void*/) +{ + + // if pins are already loaded then you can just unpin normally + if (Env.pinsLoaded) { + removeSelfFromPinned(Env, safeKey, channelList); + return void cb(); + } + + // otherwise defer until later... + deferUserTask(Env, safeKey, function () { + removeSelfFromPinned(Env, safeKey, channelList); + cb(); + }); +}; + +var getMultipleFileSize = function (Env, channels, cb) { + if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); } + if (typeof(Env.msgStore.getChannelSize) !== 'function') { + return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); + } + + var i = channels.length; + var counts = {}; + + var done = function () { + i--; + if (i === 0) { return cb(void 0, counts); } + }; + + channels.forEach(function (channel) { + Pinning.getFileSize(Env, channel, function (e, size) { + if (e) { + // most likely error here is that a file no longer exists + // but a user still has it in their drive, and wants to know + // its size. We should find a way to inform them of this in + // the future. For now we can just tell them it has no size. + + //WARN('getFileSize', e); + counts[channel] = 0; + return done(); + } + counts[channel] = size; + done(); + }); + }); +}; + +const batchUserPins = BatchRead("LOAD_USER_PINS"); +var loadUserPins = function (Env, safeKey, cb) { + var session = Core.getSession(Env.Sessions, safeKey); + + if (session.channels) { + return cb(session.channels); + } + + batchUserPins(safeKey, cb, function (done) { + var ref = {}; + var lineHandler = Pins.createLineHandler(ref, function (label, data) { + Env.Log.error(label, { + log: safeKey, + data: data, + }); + }); + + // if channels aren't in memory. load them from disk + // TODO replace with readMessagesBin + Env.pinStore.getMessages(safeKey, lineHandler, function () { + // no more messages + + // only put this into the cache if it completes + session.channels = ref.pins; + done(ref.pins); // FIXME no error handling? + }); + }); +}; + +var truthyKeys = function (O) { + return Object.keys(O).filter(function (k) { + return O[k]; + }); +}; + +var getChannelList = Pinning.getChannelList = function (Env, safeKey, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + loadUserPins(Env, safeKey, function (pins) { + cb(truthyKeys(pins)); + }); +}; + +const batchTotalSize = BatchRead("GET_TOTAL_SIZE"); +Pinning.getTotalSize = function (Env, safeKey, cb) { + var unsafeKey = unescapeKeyCharacters(safeKey); + var limit = Env.limits[unsafeKey]; + + // Get a common key if multiple users share the same quota, otherwise take the public key + var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : safeKey; + + batchTotalSize(batchKey, cb, function (done) { + var channels = []; + var bytes = 0; + nThen(function (waitFor) { + // Get the channels list for our user account + getChannelList(Env, safeKey, waitFor(function (_channels) { + if (!_channels) { + waitFor.abort(); + return done('INVALID_PIN_LIST'); + } + Array.prototype.push.apply(channels, _channels); + })); + // Get the channels list for users sharing our quota + if (limit && Array.isArray(limit.users) && limit.users.length > 1) { + limit.users.forEach(function (key) { + if (key === unsafeKey) { return; } // Don't count ourselves twice + getChannelList(Env, key, waitFor(function (_channels) { + if (!_channels) { return; } // Broken user, don't count their quota + Array.prototype.push.apply(channels, _channels); + })); + }); + } + }).nThen(function (waitFor) { + // Get size of the channels + var list = []; // Contains the channels already counted in the quota to avoid duplicates + channels.forEach(function (channel) { // TODO semaphore? + if (list.indexOf(channel) !== -1) { return; } + list.push(channel); + Pinning.getFileSize(Env, channel, waitFor(function (e, size) { + if (!e) { bytes += size; } + })); + }); + }).nThen(function () { + done(void 0, bytes); + }); + }); +}; + +/* Users should be able to clear their own pin log with an authenticated RPC +*/ +Pinning.removePins = function (Env, safeKey, cb) { + if (typeof(Env.pinStore.removeChannel) !== 'function') { + return void cb("E_NOT_IMPLEMENTED"); + } + Env.pinStore.removeChannel(safeKey, function (err) { + Env.Log.info('DELETION_PIN_BY_OWNER_RPC', { + safeKey: safeKey, + status: err? String(err): 'SUCCESS', + }); + + if (err) { return void cb(err); } + cb(void 0, 'OK'); + }); +}; + +Pinning.trimPins = function (Env, safeKey, cb) { + cb("NOT_IMPLEMENTED"); +}; + +var getFreeSpace = Pinning.getFreeSpace = function (Env, safeKey, cb) { + getLimit(Env, safeKey, function (e, limit) { + if (e) { return void cb(e); } + Pinning.getTotalSize(Env, safeKey, function (e, size) { + if (typeof(size) === 'undefined') { return void cb(e); } + + var rem = limit[0] - size; + if (typeof(rem) !== 'number') { + return void cb('invalid_response'); + } + cb(void 0, rem); + }); + }); +}; + +var hashChannelList = function (A) { + var uniques = []; + + A.forEach(function (a) { + if (uniques.indexOf(a) === -1) { uniques.push(a); } + }); + uniques.sort(); + + var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl + .util.decodeUTF8(JSON.stringify(uniques)))); + + return hash; +}; + +var getHash = Pinning.getHash = function (Env, safeKey, cb) { + getChannelList(Env, safeKey, function (channels) { + cb(void 0, hashChannelList(channels)); + }); +}; + +Pinning.pinChannel = function (Env, safeKey, channels, cb) { + if (!channels && channels.filter) { + return void cb('INVALID_PIN_LIST'); + } + + // get channel list ensures your session has a cached channel list + getChannelList(Env, safeKey, function (pinned) { + var session = Core.getSession(Env.Sessions, safeKey); + + // only pin channels which are not already pinned + var toStore = channels.filter(function (channel) { + return pinned.indexOf(channel) === -1; + }); + + if (toStore.length === 0) { + return void getHash(Env, safeKey, cb); + } + + getMultipleFileSize(Env, toStore, function (e, sizes) { + if (typeof(sizes) === 'undefined') { return void cb(e); } + var pinSize = sumChannelSizes(sizes); + + getFreeSpace(Env, safeKey, function (e, free) { + if (typeof(free) === 'undefined') { + Env.WARN('getFreeSpace', e); + return void cb(e); + } + if (pinSize > free) { return void cb('E_OVER_LIMIT'); } + + Env.pinStore.message(safeKey, JSON.stringify(['PIN', toStore, +new Date()]), + function (e) { + if (e) { return void cb(e); } + toStore.forEach(function (channel) { + session.channels[channel] = true; + }); + addPinned(Env, safeKey, toStore, () => {}); + getHash(Env, safeKey, cb); + }); + }); + }); + }); +}; + +Pinning.unpinChannel = function (Env, safeKey, channels, cb) { + if (!channels && channels.filter) { + // expected array + return void cb('INVALID_PIN_LIST'); + } + + getChannelList(Env, safeKey, function (pinned) { + var session = Core.getSession(Env.Sessions, safeKey); + + // only unpin channels which are pinned + var toStore = channels.filter(function (channel) { + return pinned.indexOf(channel) !== -1; + }); + + if (toStore.length === 0) { + return void getHash(Env, safeKey, cb); + } + + Env.pinStore.message(safeKey, JSON.stringify(['UNPIN', toStore, +new Date()]), + function (e) { + if (e) { return void cb(e); } + toStore.forEach(function (channel) { + delete session.channels[channel]; + }); + removePinned(Env, safeKey, toStore, () => {}); + getHash(Env, safeKey, cb); + }); + }); +}; + +Pinning.resetUserPins = function (Env, safeKey, channelList, cb) { + if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } + var session = Core.getSession(Env.Sessions, safeKey); + + if (!channelList.length) { + return void getHash(Env, safeKey, function (e, hash) { + if (e) { return cb(e); } + cb(void 0, hash); + }); + } + + var pins = {}; + getMultipleFileSize(Env, channelList, function (e, sizes) { + if (typeof(sizes) === 'undefined') { return void cb(e); } + var pinSize = sumChannelSizes(sizes); + + + getLimit(Env, safeKey, function (e, limit) { + if (e) { + Env.WARN('[RESET_ERR]', e); + return void cb(e); + } + + /* we want to let people pin, even if they are over their limit, + but they should only be able to do this once. + + This prevents data loss in the case that someone registers, but + does not have enough free space to pin their migrated data. + + They will not be able to pin additional pads until they upgrade + or delete enough files to go back under their limit. */ + if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } + Env.pinStore.message(safeKey, JSON.stringify(['RESET', channelList, +new Date()]), + function (e) { + if (e) { return void cb(e); } + channelList.forEach(function (channel) { + pins[channel] = true; + }); + + var oldChannels; + if (session.channels && typeof(session.channels) === 'object') { + oldChannels = Object.keys(session.channels); + } else { + oldChannels = []; + } + removePinned(Env, safeKey, oldChannels, () => { + addPinned(Env, safeKey, channelList, ()=>{}); + }); + + // update in-memory cache IFF the reset was allowed. + session.channels = pins; + getHash(Env, safeKey, function (e, hash) { + cb(e, hash); + }); + }); + }); + }); +}; + +Pinning.getFileSize = function (Env, channel, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + if (!Core.isValidId(channel)) { return void cb('INVALID_CHAN'); } + if (channel.length === 32) { + if (typeof(Env.msgStore.getChannelSize) !== 'function') { + return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); + } + + return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) { + if (e) { + if (e.code === 'ENOENT') { return void cb(void 0, 0); } + return void cb(e.code); + } + cb(void 0, size); + }); + } + + // 'channel' refers to a file, so you need another API + Env.blobStore.size(channel, function (e, size) { + if (typeof(size) === 'undefined') { return void cb(e); } + cb(void 0, size); + }); +}; + +/* accepts a list, and returns a sublist of channel or file ids which seem + to have been deleted from the server (file size 0) + + we might consider that we should only say a file is gone if fs.stat returns + ENOENT, but for now it's simplest to just rely on getFileSize... +*/ +Pinning.getDeletedPads = function (Env, channels, cb) { + if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } + var L = channels.length; + + var sem = Saferphore.create(10); + var absentees = []; + + var job = function (channel, wait) { + return function (give) { + Pinning.getFileSize(Env, channel, wait(give(function (e, size) { + if (e) { return; } + if (size === 0) { absentees.push(channel); } + }))); + }; + }; + + nThen(function (w) { + for (var i = 0; i < L; i++) { + sem.take(job(channels[i], w)); + } + }).nThen(function () { + cb(void 0, absentees); + }); +}; + +const answerNoConclusively = function (Env) { + const pending = Env.pendingPinInquiries; + for (var channel in pending) { + answerDeferred(Env, channel, false); + } +}; + +// inform that the +Pinning.loadChannelPins = function (Env) { + const stats = { + surplus: 0, + pinned: 0, + duplicated: 0, + // in theory we could use this number for the admin panel + // but we'd have to keep updating it whenever a new pin log + // was created or deleted. In practice it's probably not worth the trouble + users: 0, + }; + + const handler = function (ref, safeKey, pinned) { + if (ref.surplus) { + stats.surplus += ref.surplus; + } + for (var channel in ref.pins) { + if (!pinned.hasOwnProperty(channel)) { + answerDeferred(Env, channel, true); + stats.pinned++; + } else { + stats.duplicated++; + } + } + stats.users++; + runUserDeferred(Env, safeKey); + }; + + Pins.list(function (err) { + if (err) { + Env.pinsLoaded = true; + Env.Log.error("LOAD_CHANNEL_PINS", err); + return; + } + + Env.pinsLoaded = true; + answerNoConclusively(Env); + runRemainingDeferred(Env); + }, { + pinPath: Env.paths.pin, + handler: handler, + pinned: Env.pinnedPads, + workers: Env.pinWorkers, + }); +}; + +/* +const deferResponse = function (Env, channel, cb) { + const pending = Env.pendingPinInquiries; + (pending[channel] = pending[channel] || []).push(cb); +}; +*/ + +Pinning.isChannelPinned = function (Env, channel, cb) { + return void cb(void 0, true); // XXX +/* + // if the pins are fully loaded then you can answer yes/no definitively + if (Env.pinsLoaded) { + return void cb(void 0, !isEmpty(Env.pinnedPads[channel])); + } + + // you may already know that a channel is pinned + // even if you're still loading. answer immediately if so + if (!isEmpty(Env.pinnedPads[channel])) { return cb(void 0, true); } + + // if you're still loading them then can answer 'yes' as soon + // as you learn that one account has pinned a file. + // negative responses have to wait until the end + deferResponse(Env, channel, cb); +*/ +}; + diff --git a/lib/commands/quota.js b/lib/commands/quota.js new file mode 100644 index 000000000..9e1c631d9 --- /dev/null +++ b/lib/commands/quota.js @@ -0,0 +1,104 @@ +/*jshint esversion: 6 */ +/* globals Buffer*/ +const Quota = module.exports; + +const Util = require("../common-util"); +const Package = require('../../package.json'); +const Https = require("https"); + +Quota.applyCustomLimits = function (Env) { + var isLimit = function (o) { + var valid = o && typeof(o) === 'object' && + typeof(o.limit) === 'number' && + typeof(o.plan) === 'string' && + typeof(o.note) === 'string'; + return valid; + }; + + // read custom limits from the Environment (taken from config) + var customLimits = (function (custom) { + var limits = {}; + Object.keys(custom).forEach(function (k) { + k.replace(/\/([^\/]+)$/, function (all, safeKey) { + var id = Util.unescapeKeyCharacters(safeKey || ''); + limits[id] = custom[k]; + return ''; + }); + }); + return limits; + }(Env.customLimits || {})); + + Object.keys(customLimits).forEach(function (k) { + if (!isLimit(customLimits[k])) { return; } + Env.limits[k] = customLimits[k]; + }); +}; + +Quota.updateCachedLimits = function (Env, cb) { + Quota.applyCustomLimits(Env); + if (Env.allowSubscriptions === false || Env.blockDailyCheck === true) { return void cb(); } + + var body = JSON.stringify({ + domain: Env.myDomain, + subdomain: Env.mySubdomain || null, + adminEmail: Env.adminEmail, + version: Package.version + }); + var options = { + host: 'accounts.cryptpad.fr', + path: '/api/getauthorized', + method: 'POST', + headers: { + "Content-Type": "application/json", + "Content-Length": Buffer.byteLength(body) + } + }; + + var req = Https.request(options, function (response) { + if (!('' + response.statusCode).match(/^2\d\d$/)) { + return void cb('SERVER ERROR ' + response.statusCode); + } + var str = ''; + + response.on('data', function (chunk) { + str += chunk; + }); + + response.on('end', function () { + try { + var json = JSON.parse(str); + Env.limits = json; + Quota.applyCustomLimits(Env); + cb(void 0); + } catch (e) { + cb(e); + } + }); + }); + + req.on('error', function (e) { + Quota.applyCustomLimits(Env); + if (!Env.myDomain) { return cb(); } + // only return an error if your server allows subscriptions + cb(e); + }); + + req.end(body); +}; + +// The limits object contains storage limits for all the publicKey that have paid +// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit +Quota.getUpdatedLimit = function (Env, safeKey, cb) { // FIXME BATCH?S + Quota.updateCachedLimits(Env, function (err) { + if (err) { return void cb(err); } + + var limit = Env.limits[safeKey]; + + if (limit && typeof(limit.limit) === 'number') { + return void cb(void 0, [limit.limit, limit.plan, limit.note]); + } + + return void cb(void 0, [Env.defaultStorageLimit, '', '']); + }); +}; + diff --git a/lib/commands/upload.js b/lib/commands/upload.js new file mode 100644 index 000000000..c64368949 --- /dev/null +++ b/lib/commands/upload.js @@ -0,0 +1,89 @@ +/*jshint esversion: 6 */ +const Upload = module.exports; +const Util = require("../common-util"); +const Pinning = require("./pin-rpc"); +const nThen = require("nthen"); +const Core = require("./core"); + +Upload.status = function (Env, safeKey, filesize, _cb) { // FIXME FILES + var cb = Util.once(Util.mkAsync(_cb)); + + // validate that the provided size is actually a positive number + if (typeof(filesize) !== 'number' && + filesize >= 0) { return void cb('E_INVALID_SIZE'); } + + nThen(function (w) { + // if the proposed upload size is within the regular limit + // jump ahead to the next block + if (filesize <= Env.maxUploadSize) { return; } + + // if larger uploads aren't explicitly enabled then reject them + if (typeof(Env.premiumUploadSize) !== 'number') { + w.abort(); + return void cb('TOO_LARGE'); + } + + // otherwise go and retrieve info about the user's quota + Pinning.getLimit(Env, safeKey, w(function (err, limit) { + if (err) { + w.abort(); + return void cb("E_BAD_LIMIT"); + } + + var plan = limit[1]; + + // see if they have a special plan, reject them if not + if (plan === '') { + w.abort(); + return void cb('TOO_LARGE'); + } + + // and that they're not over the greater limit + if (filesize >= Env.premiumUploadSize) { + w.abort(); + return void cb("TOO_LARGE"); + } + + // fallthrough will proceed to the next block + })); + }).nThen(function (w) { + var abortAndCB = Util.both(w.abort, cb); + Env.blobStore.status(safeKey, w(function (err, inProgress) { + // if there's an error something is weird + if (err) { return void abortAndCB(err); } + + // we cannot upload two things at once + if (inProgress) { return void abortAndCB(void 0, true); } + })); + }).nThen(function () { + // if yuo're here then there are no pending uploads + // check if you have space in your quota to upload something of this size + Pinning.getFreeSpace(Env, safeKey, function (e, free) { + if (e) { return void cb(e); } + if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } + + var user = Core.getSession(Env.Sessions, safeKey); + user.pendingUploadSize = filesize; + user.currentUploadSize = 0; + + cb(void 0, false); + }); + }); +}; + +Upload.upload = function (Env, safeKey, chunk, cb) { + Env.blobStore.upload(safeKey, chunk, cb); +}; + +Upload.complete = function (Env, safeKey, arg, cb) { + Env.blobStore.complete(safeKey, arg, cb); +}; + +Upload.cancel = function (Env, safeKey, arg, cb) { + Env.blobStore.cancel(safeKey, arg, cb); +}; + +Upload.complete_owned = function (Env, safeKey, arg, cb) { + Env.blobStore.completeOwned(safeKey, arg, cb); +}; + diff --git a/lib/deduplicate.js b/lib/deduplicate.js deleted file mode 100644 index 3ad62e6b0..000000000 --- a/lib/deduplicate.js +++ /dev/null @@ -1,11 +0,0 @@ -// remove duplicate elements in an array -module.exports = function (O) { - // make a copy of the original array - var A = O.slice(); - for (var i = 0; i < A.length; i++) { - for (var j = i + 1; j < A.length; j++) { - if (A[i] === A[j]) { A.splice(j--, 1); } - } - } - return A; -}; diff --git a/lib/defaults.js b/lib/defaults.js new file mode 100644 index 000000000..7119a0c6a --- /dev/null +++ b/lib/defaults.js @@ -0,0 +1,86 @@ +var Default = module.exports; + +Default.commonCSP = function (domain) { + domain = ' ' + domain; + // Content-Security-Policy + + return [ + "default-src 'none'", + "style-src 'unsafe-inline' 'self' " + domain, + "font-src 'self' data:" + domain, + + /* child-src is used to restrict iframes to a set of allowed domains. + * connect-src is used to restrict what domains can connect to the websocket. + * + * it is recommended that you configure these fields to match the + * domain which will serve your CryptPad instance. + */ + "child-src blob: *", + // IE/Edge + "frame-src blob: *", + + /* this allows connections over secure or insecure websockets + if you are deploying to production, you'll probably want to remove + the ws://* directive, and change '*' to your domain + */ + "connect-src 'self' ws: wss: blob:" + domain, + + // data: is used by codemirror + "img-src 'self' data: blob:" + domain, + "media-src * blob:", + + // for accounts.cryptpad.fr authentication and cross-domain iframe sandbox + "frame-ancestors *", + "" + ]; +}; + +Default.contentSecurity = function (domain) { + return (Default.commonCSP(domain).join('; ') + "script-src 'self' resource: " + domain).replace(/\s+/g, ' '); +}; + +Default.padContentSecurity = function (domain) { + return (Default.commonCSP(domain).join('; ') + "script-src 'self' 'unsafe-eval' 'unsafe-inline' resource: " + domain).replace(/\s+/g, ' '); +}; + +Default.httpHeaders = function () { + return { + "X-XSS-Protection": "1; mode=block", + "X-Content-Type-Options": "nosniff", + "Access-Control-Allow-Origin": "*" + }; +}; + +Default.mainPages = function () { + return [ + 'index', + 'privacy', + 'terms', + 'about', + 'contact', + 'what-is-cryptpad', + 'features', + 'faq', + 'maintenance' + ]; +}; + +/* By default the CryptPad server will run scheduled tasks every five minutes + * If you want to run scheduled tasks in a separate process (like a crontab) + * you can disable this behaviour by setting the following value to true + */ + //disableIntegratedTasks: false, + + /* CryptPad's file storage adaptor closes unused files after a configurable + * number of milliseconds (default 30000 (30 seconds)) + */ +// channelExpirationMs: 30000, + + /* CryptPad's file storage adaptor is limited by the number of open files. + * When the adaptor reaches openFileLimit, it will clean up older files + */ + //openFileLimit: 2048, + + + + diff --git a/lib/historyKeeper.js b/lib/historyKeeper.js new file mode 100644 index 000000000..fcd291414 --- /dev/null +++ b/lib/historyKeeper.js @@ -0,0 +1,267 @@ +/* jshint esversion: 6 */ + +const nThen = require('nthen'); +const Crypto = require('crypto'); +const WriteQueue = require("./write-queue"); +const BatchRead = require("./batch-read"); +const RPC = require("./rpc"); +const HK = require("./hk-util.js"); +const Core = require("./commands/core"); + +const Store = require("./storage/file"); +const BlobStore = require("./storage/blob"); + +module.exports.create = function (config, cb) { + const Log = config.log; + var WARN = function (e, output) { + if (e && output) { + Log.warn(e, { + output: output, + message: String(e), + stack: new Error(e).stack, + }); + } + }; + + Log.silly('HK_LOADING', 'LOADING HISTORY_KEEPER MODULE'); + + // TODO populate Env with everything that you use from config + // so that you can stop passing around your raw config + // and more easily share state between historyKeeper and rpc + const Env = { + Log: Log, + // tasks + // store + id: Crypto.randomBytes(8).toString('hex'), + + metadata_cache: {}, + channel_cache: {}, + queueStorage: WriteQueue(), + + batchIndexReads: BatchRead("HK_GET_INDEX"), + batchMetadata: BatchRead('GET_METADATA'), + batchRegisteredUsers: BatchRead("GET_REGISTERED_USERS"), + batchDiskUsage: BatchRead('GET_DISK_USAGE'), + + //historyKeeper: config.historyKeeper, + intervals: config.intervals || {}, + maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), + premiumUploadSize: false, // overridden below... + Sessions: {}, + paths: {}, + //msgStore: config.store, + + netfluxUsers: {}, + + pinStore: undefined, + pinnedPads: {}, + pinsLoaded: false, + pendingPinInquiries: {}, + pendingUnpins: {}, + pinWorkers: 5, + + limits: {}, + admins: [], + WARN: WARN, + flushCache: config.flushCache, + adminEmail: config.adminEmail, + allowSubscriptions: config.allowSubscriptions === true, + blockDailyCheck: config.blockDailyCheck === true, + + myDomain: config.myDomain, + mySubdomain: config.mySubdomain, // only exists for the accounts integration + customLimits: config.customLimits || {}, + // FIXME this attribute isn't in the default conf + // but it is referenced in Quota + domain: config.domain + }; + + (function () { + var pes = config.premiumUploadSize; + if (!isNaN(pes) && pes >= Env.maxUploadSize) { + Env.premiumUploadSize = pes; + } + }()); + + var paths = Env.paths; + + var keyOrDefaultString = function (key, def) { + return typeof(config[key]) === 'string'? config[key]: def; + }; + + var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); + paths.block = keyOrDefaultString('blockPath', './block'); + paths.data = keyOrDefaultString('filePath', './datastore'); + paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); + paths.blob = keyOrDefaultString('blobPath', './blob'); + + Env.defaultStorageLimit = typeof(config.defaultStorageLimit) === 'number' && config.defaultStorageLimit > 0? + config.defaultStorageLimit: + Core.DEFAULT_LIMIT; + + try { + Env.admins = (config.adminKeys || []).map(function (k) { + k = k.replace(/\/+$/, ''); + var s = k.split('/'); + return s[s.length-1]; + }); + } catch (e) { + console.error("Can't parse admin keys. Please update or fix your config.js file!"); + } + + config.historyKeeper = Env.historyKeeper = { + metadata_cache: Env.metadata_cache, + channel_cache: Env.channel_cache, + + id: Env.id, + + channelMessage: function (Server, channel, msgStruct) { + // netflux-server emits 'channelMessage' events whenever someone broadcasts to a channel + // historyKeeper stores these messages if the channel id indicates that they are + // a channel type with permanent history + HK.onChannelMessage(Env, Server, channel, msgStruct); + }, + channelClose: function (channelName) { + // netflux-server emits 'channelClose' events whenever everyone leaves a channel + // we drop cached metadata and indexes at the same time + HK.dropChannel(Env, channelName); + }, + channelOpen: function (Server, channelName, userId, wait) { + Env.channel_cache[channelName] = Env.channel_cache[channelName] || {}; + + var sendHKJoinMessage = function () { + Server.send(userId, [ + 0, + Env.id, + 'JOIN', + channelName + ]); + }; + + // a little backwards compatibility in case you don't have the latest server + // allow lists won't work unless you update, though + if (typeof(wait) !== 'function') { return void sendHKJoinMessage(); } + + var next = wait(); + var cb = function (err, info) { + next(err, info, sendHKJoinMessage); + }; + + // only conventional channels can be restricted + if ((channelName || "").length !== HK.STANDARD_CHANNEL_LENGTH) { + return void cb(); + } + + // gets and caches the metadata... + HK.getMetadata(Env, channelName, function (err, metadata) { + if (err) { + Log.error('HK_METADATA_ERR', { + channel: channelName, + error: err, + }); + } + if (!metadata || (metadata && !metadata.restricted)) { + // the channel doesn't have metadata, or it does and it's not restricted + // either way, let them join. + return void cb(); + } + + // this channel is restricted. verify that the user in question is in the allow list + + // construct a definitive list (owners + allowed) + var allowed = HK.listAllowedUsers(metadata); + // and get the list of keys for which this user has already authenticated + var session = HK.getNetfluxSession(Env, userId); + + if (HK.isUserSessionAllowed(allowed, session)) { + return void cb(); + } + + // otherwise they're not allowed. + // respond with a special error that includes the list of keys + // which would be allowed... + // FIXME RESTRICT bonus points if you hash the keys to limit data exposure + cb("ERESTRICTED", allowed); + }); + }, + sessionClose: function (userId, reason) { + HK.closeNetfluxSession(Env, userId); + if (['BAD_MESSAGE', 'SOCKET_ERROR', 'SEND_MESSAGE_FAIL_2'].indexOf(reason) !== -1) { + if (reason && reason.code === 'ECONNRESET') { return; } + return void Log.error('SESSION_CLOSE_WITH_ERROR', { + userId: userId, + reason: reason, + }); + } + + if (['SOCKET_CLOSED', 'SOCKET_ERROR'].indexOf(reason)) { return; } + Log.verbose('SESSION_CLOSE_ROUTINE', { + userId: userId, + reason: reason, + }); + }, + directMessage: function (Server, seq, userId, json) { + // netflux-server allows you to register an id with a handler + // this handler is invoked every time someone sends a message to that id + HK.onDirectMessage(Env, Server, seq, userId, json); + }, + }; + + Log.verbose('HK_ID', 'History keeper ID: ' + Env.id); + + nThen(function (w) { + // create a pin store + Store.create({ + filePath: pinPath, + }, w(function (s) { + Env.pinStore = s; + })); + + // create a channel store + Store.create(config, w(function (_store) { + config.store = _store; + Env.msgStore = _store; // API used by rpc + Env.store = _store; // API used by historyKeeper + })); + + // create a blob store + BlobStore.create({ + blobPath: config.blobPath, + blobStagingPath: config.blobStagingPath, + archivePath: config.archivePath, + getSession: function (safeKey) { + return Core.getSession(Env.Sessions, safeKey); + }, + }, w(function (err, blob) { + if (err) { throw new Error(err); } + Env.blobStore = blob; + })); + }).nThen(function (w) { + // create a task store + require("./storage/tasks").create(config, w(function (e, tasks) { + if (e) { + throw e; + } + Env.tasks = tasks; + config.tasks = tasks; + if (config.disableIntegratedTasks) { return; } + + config.intervals = config.intervals || {}; + config.intervals.taskExpiration = setInterval(function () { + tasks.runAll(function (err) { + if (err) { + // either TASK_CONCURRENCY or an error with tasks.list + // in either case it is already logged. + } + }); + }, 1000 * 60 * 5); // run every five minutes + })); + }).nThen(function () { + RPC.create(Env, function (err, _rpc) { + if (err) { throw err; } + + Env.rpc = _rpc; + cb(void 0, config.historyKeeper); + }); + }); +}; diff --git a/lib/hk-util.js b/lib/hk-util.js new file mode 100644 index 000000000..cb9e9b8ef --- /dev/null +++ b/lib/hk-util.js @@ -0,0 +1,1009 @@ +/* jshint esversion: 6 */ +/* global Buffer */ +var HK = module.exports; + +const nThen = require('nthen'); +const Util = require("./common-util"); +const MetaRPC = require("./commands/metadata"); +const Nacl = require('tweetnacl/nacl-fast'); + +const now = function () { return (new Date()).getTime(); }; +const ONE_DAY = 1000 * 60 * 60 * 24; // one day in milliseconds + +/* getHash + * this function slices off the leading portion of a message which is + most likely unique + * these "hashes" are used to identify particular messages in a channel's history + * clients store "hashes" either in memory or in their drive to query for new messages: + * when reconnecting to a pad + * when connecting to chat or a mailbox + * thus, we can't change this function without invalidating client data which: + * is encrypted clientside + * can't be easily migrated + * don't break it! +*/ +const getHash = HK.getHash = function (msg, Log) { + if (typeof(msg) !== 'string') { + if (Log) { + Log.warn('HK_GET_HASH', 'getHash() called on ' + typeof(msg) + ': ' + msg); + } + return ''; + } + return msg.slice(0,64); +}; + +// historyKeeper should explicitly store any channel +// with a 32 character id +const STANDARD_CHANNEL_LENGTH = HK.STANDARD_CHANNEL_LENGTH = 32; + +// historyKeeper should not store messages sent to any channel +// with a 34 character id +const EPHEMERAL_CHANNEL_LENGTH = HK.EPHEMERAL_CHANNEL_LENGTH = 34; + +const tryParse = function (Env, str) { + try { + return JSON.parse(str); + } catch (err) { + Env.Log.error('HK_PARSE_ERROR', err); + } +}; + +/* sliceCpIndex + returns a list of all checkpoints which might be relevant for a client connecting to a session + + * if there are two or fewer checkpoints, return everything you have + * if there are more than two + * return at least two + * plus any more which were received within the last 100 messages + + This is important because the additional history is what prevents + clients from forking on checkpoints and dropping forked history. + +*/ +const sliceCpIndex = function (cpIndex, line) { + // Remove "old" checkpoints (cp sent before 100 messages ago) + const minLine = Math.max(0, (line - 100)); + let start = cpIndex.slice(0, -2); + const end = cpIndex.slice(-2); + start = start.filter(function (obj) { + return obj.line > minLine; + }); + return start.concat(end); +}; + +const isMetadataMessage = HK.isMetadataMessage = function (parsed) { + return Boolean(parsed && parsed.channel); +}; + +HK.listAllowedUsers = function (metadata) { + return (metadata.owners || []).concat((metadata.allowed || [])); +}; + +HK.getNetfluxSession = function (Env, netfluxId) { + return Env.netfluxUsers[netfluxId]; +}; + +HK.isUserSessionAllowed = function (allowed, session) { + if (!session) { return false; } + for (var unsafeKey in session) { + if (allowed.indexOf(unsafeKey) !== -1) { + return true; + } + } + return false; +}; + +HK.authenticateNetfluxSession = function (Env, netfluxId, unsafeKey) { + var user = Env.netfluxUsers[netfluxId] = Env.netfluxUsers[netfluxId] || {}; + user[unsafeKey] = +new Date(); +}; + +HK.closeNetfluxSession = function (Env, netfluxId) { + delete Env.netfluxUsers[netfluxId]; +}; + +// validateKeyStrings supplied by clients must decode to 32-byte Uint8Arrays +const isValidValidateKeyString = function (key) { + try { + return typeof(key) === 'string' && + Nacl.util.decodeBase64(key).length === Nacl.sign.publicKeyLength; + } catch (e) { + return false; + } +}; + +var CHECKPOINT_PATTERN = /^cp\|(([A-Za-z0-9+\/=]+)\|)?/; + +/* expireChannel is here to clean up channels that should have been removed + but for some reason are still present +*/ +const expireChannel = function (Env, channel) { + return void Env.store.archiveChannel(channel, function (err) { + Env.Log.info("ARCHIVAL_CHANNEL_BY_HISTORY_KEEPER_EXPIRATION", { + channelId: channel, + status: err? String(err): "SUCCESS", + }); + }); +}; + +/* dropChannel + * cleans up memory structures which are managed entirely by the historyKeeper +*/ +const dropChannel = HK.dropChannel = function (Env, chanName) { + delete Env.metadata_cache[chanName]; + delete Env.channel_cache[chanName]; +}; + +/* checkExpired + * synchronously returns true or undefined to indicate whether the channel is expired + * according to its metadata + * has some side effects: + * closes the channel via the store.closeChannel API + * and then broadcasts to all channel members that the channel has expired + * removes the channel from the netflux-server's in-memory cache + * removes the channel metadata from history keeper's in-memory cache + + FIXME the boolean nature of this API should be separated from its side effects +*/ +const checkExpired = function (Env, Server, channel) { + const store = Env.store; + const metadata_cache = Env.metadata_cache; + + if (!(channel && channel.length === STANDARD_CHANNEL_LENGTH)) { return false; } + let metadata = metadata_cache[channel]; + if (!(metadata && typeof(metadata.expire) === 'number')) { return false; } + + // the number of milliseconds ago the channel should have expired + let pastDue = (+new Date()) - metadata.expire; + + // less than zero means that it hasn't expired yet + if (pastDue < 0) { return false; } + + // if it should have expired more than a day ago... + // there may have been a problem with scheduling tasks + // or the scheduled tasks may not be running + // so trigger a removal from here + if (pastDue >= ONE_DAY) { expireChannel(Env, channel); } + + // close the channel + store.closeChannel(channel, function () { + Server.channelBroadcast(channel, { + error: 'EEXPIRED', + channel: channel + }, Env.id); + dropChannel(Env, channel); + }); + + // return true to indicate that it has expired + return true; +}; + +const getMetadata = HK.getMetadata = function (Env, channelName, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + + var metadata = Env.metadata_cache[channelName]; + if (metadata && typeof(metadata) === 'object') { + return void cb(undefined, metadata); + } + + MetaRPC.getMetadataRaw(Env, channelName, function (err, metadata) { + if (err) { + console.error(err); + return void cb(err); + } + if (!(metadata && typeof(metadata.channel) === 'string' && metadata.channel.length === STANDARD_CHANNEL_LENGTH)) { + return cb(); + } + + // cache it + Env.metadata_cache[channelName] = metadata; + cb(undefined, metadata); + }); +}; + +/* computeIndex + can call back with an error or a computed index which includes: + * cpIndex: + * array including any checkpoints pushed within the last 100 messages + * processed by 'sliceCpIndex(cpIndex, line)' + * offsetByHash: + * a map containing message offsets by their hash + * this is for every message in history, so it could be very large... + * except we remove offsets from the map if they occur before the oldest relevant checkpoint + * size: in bytes + * metadata: + * validationKey + * expiration time + * owners + * ??? (anything else we might add in the future) + * line + * the number of messages in history + * including the initial metadata line, if it exists + +*/ +const computeIndex = function (Env, channelName, cb) { + const store = Env.store; + const Log = Env.Log; + + const cpIndex = []; + let messageBuf = []; + let i = 0; + + const CB = Util.once(cb); + + const offsetByHash = {}; + let size = 0; + nThen(function (w) { + // iterate over all messages in the channel log + // old channels can contain metadata as the first message of the log + // skip over metadata as that is handled elsewhere + // otherwise index important messages in the log + store.readMessagesBin(channelName, 0, (msgObj, readMore) => { + let msg; + // keep an eye out for the metadata line if you haven't already seen it + // but only check for metadata on the first line + if (!i && msgObj.buff.indexOf('{') === 0) { + i++; // always increment the message counter + msg = tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return readMore(); } + + // validate that the current line really is metadata before storing it as such + // skip this, as you already have metadata... + if (isMetadataMessage(msg)) { return readMore(); } + } + i++; + if (msgObj.buff.indexOf('cp|') > -1) { + msg = msg || tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return readMore(); } + // cache the offsets of checkpoints if they can be parsed + if (msg[2] === 'MSG' && msg[4].indexOf('cp|') === 0) { + cpIndex.push({ + offset: msgObj.offset, + line: i + }); + // we only want to store messages since the latest checkpoint + // so clear the buffer every time you see a new one + messageBuf = []; + } + } + // if it's not metadata or a checkpoint then it should be a regular message + // store it in the buffer + messageBuf.push(msgObj); + return readMore(); + }, w((err) => { + if (err && err.code !== 'ENOENT') { + w.abort(); + return void CB(err); + } + + // once indexing is complete you should have a buffer of messages since the latest checkpoint + // map the 'hash' of each message to its byte offset in the log, to be used for reconnecting clients + messageBuf.forEach((msgObj) => { + const msg = tryParse(Env, msgObj.buff.toString('utf8')); + if (typeof msg === "undefined") { return; } + if (msg[0] === 0 && msg[2] === 'MSG' && typeof(msg[4]) === 'string') { + // msgObj.offset is API guaranteed by our storage module + // it should always be a valid positive integer + offsetByHash[getHash(msg[4], Log)] = msgObj.offset; + } + // There is a trailing \n at the end of the file + size = msgObj.offset + msgObj.buff.length + 1; + }); + })); + }).nThen(function () { + // return the computed index + CB(null, { + // Only keep the checkpoints included in the last 100 messages + cpIndex: sliceCpIndex(cpIndex, i), + offsetByHash: offsetByHash, + size: size, + //metadata: metadata, + line: i + }); + }); +}; + +/* getIndex + calls back with an error if anything goes wrong + or with a cached index for a channel if it exists + (along with metadata) + otherwise it calls back with the index computed by 'computeIndex' + + as an added bonus: + if the channel exists but its index does not then it caches the index +*/ +const getIndex = (Env, channelName, cb) => { + const channel_cache = Env.channel_cache; + + const chan = channel_cache[channelName]; + + // if there is a channel in memory and it has an index cached, return it + if (chan && chan.index) { + // enforce async behaviour + return void Util.mkAsync(cb)(undefined, chan.index); + } + + Env.batchIndexReads(channelName, cb, function (done) { + computeIndex(Env, channelName, (err, ret) => { + // this is most likely an unrecoverable filesystem error + if (err) { return void done(err); } + // cache the computed result if possible + if (chan) { chan.index = ret; } + // return + done(void 0, ret); + }); + }); +}; + +/* storeMessage + * channel id + * the message to store + * whether the message is a checkpoint + * optionally the hash of the message + * it's not always used, but we guard against it + + + * async but doesn't have a callback + * source of a race condition whereby: + * two messaages can be inserted + * two offsets can be computed using the total size of all the messages + * but the offsets don't correspond to the actual location of the newlines + * because the two actions were performed like ABba... + * the fix is to use callbacks and implement queueing for writes + * to guarantee that offset computation is always atomic with writes +*/ +const storeMessage = function (Env, channel, msg, isCp, optionalMessageHash) { + const id = channel.id; + const Log = Env.Log; + + Env.queueStorage(id, function (next) { + const msgBin = Buffer.from(msg + '\n', 'utf8'); + // Store the message first, and update the index only once it's stored. + // store.messageBin can be async so updating the index first may + // result in a wrong cpIndex + nThen((waitFor) => { + Env.store.messageBin(id, msgBin, waitFor(function (err) { + if (err) { + waitFor.abort(); + Log.error("HK_STORE_MESSAGE_ERROR", err.message); + + // this error is critical, but there's not much we can do at the moment + // proceed with more messages, but they'll probably fail too + // at least you won't have a memory leak + + // TODO make it possible to respond to clients with errors so they know + // their message wasn't stored + return void next(); + } + })); + }).nThen((waitFor) => { + getIndex(Env, id, waitFor((err, index) => { + if (err) { + Log.warn("HK_STORE_MESSAGE_INDEX", err.stack); + // non-critical, we'll be able to get the channel index later + return void next(); + } + if (typeof (index.line) === "number") { index.line++; } + if (isCp) { + index.cpIndex = sliceCpIndex(index.cpIndex, index.line || 0); + for (let k in index.offsetByHash) { + if (index.offsetByHash[k] < index.cpIndex[0]) { + delete index.offsetByHash[k]; + } + } + index.cpIndex.push({ + offset: index.size, + line: ((index.line || 0) + 1) + }); + } + if (optionalMessageHash) { index.offsetByHash[optionalMessageHash] = index.size; } + index.size += msgBin.length; + + // handle the next element in the queue + next(); + })); + }); + }); +}; + + +/* getHistoryOffset + returns a number representing the byte offset from the start of the log + for whatever history you're seeking. + + query by providing a 'lastKnownHash', + which is really just a string of the first 64 characters of an encrypted message. + OR by -1 which indicates that we want the full history (byte offset 0) + OR nothing, which indicates that you want whatever messages the historyKeeper deems relevant + (typically the last few checkpoints) + + this function embeds a lot of the history keeper's logic: + + 0. if you passed -1 as the lastKnownHash it means you want the complete history + * I'm not sure why you'd need to call this function if you know it will return 0 in this case... + * it has a side-effect of filling the index cache if it's empty + 1. if you provided a lastKnownHash and that message does not exist in the history: + * either the client has made a mistake or the history they knew about no longer exists + * call back with EINVAL + 2. if you did not provide a lastKnownHash + * and there are fewer than two checkpoints: + * return 0 (read from the start of the file) + * and there are two or more checkpoints: + * return the offset of the earliest checkpoint which 'sliceCpIndex' considers relevant + 3. if you did provide a lastKnownHash + * read through the log until you find the hash that you're looking for + * call back with either the byte offset of the message that you found OR + * -1 if you didn't find it + +*/ +const getHistoryOffset = (Env, channelName, lastKnownHash, _cb) => { + const cb = Util.once(Util.mkAsync(_cb)); + const store = Env.store; + const Log = Env.Log; + + // lastKnownhash === -1 means we want the complete history + if (lastKnownHash === -1) { return void cb(null, 0); } + let offset = -1; + nThen((waitFor) => { + getIndex(Env, channelName, waitFor((err, index) => { + if (err) { waitFor.abort(); return void cb(err); } + + // check if the "hash" the client is requesting exists in the index + const lkh = index.offsetByHash[lastKnownHash]; + // we evict old hashes from the index as new checkpoints are discovered. + // if someone connects and asks for a hash that is no longer relevant, + // we tell them it's an invalid request. This is because of the semantics of "GET_HISTORY" + // which is only ever used when connecting or reconnecting in typical uses of history... + // this assumption should hold for uses by chainpad, but perhaps not for other uses cases. + // EXCEPT: other cases don't use checkpoints! + // clients that are told that their request is invalid should just make another request + // without specifying the hash, and just trust the server to give them the relevant data. + // QUESTION: does this mean mailboxes are causing the server to store too much stuff in memory? + if (lastKnownHash && typeof(lkh) !== "number") { + waitFor.abort(); + return void cb(new Error('EINVAL')); + } + + // Since last 2 checkpoints + if (!lastKnownHash) { + waitFor.abort(); + // Less than 2 checkpoints in the history: return everything + if (index.cpIndex.length < 2) { return void cb(null, 0); } + // Otherwise return the second last checkpoint's index + return void cb(null, index.cpIndex[0].offset); + /* LATER... + in practice, two checkpoints can be very close together + we have measures to avoid duplicate checkpoints, but editors + can produce nearby checkpoints which are slightly different, + and slip past these protections. To be really careful, we can + seek past nearby checkpoints by some number of patches so as + to ensure that all editors have sufficient knowledge of history + to reconcile their differences. */ + } + + offset = lkh; + })); + }).nThen((waitFor) => { + // if offset is less than zero then presumably the channel has no messages + // returning falls through to the next block and therefore returns -1 + if (offset !== -1) { return; } + + // do a lookup from the index + // FIXME maybe we don't need this anymore? + // otherwise we have a non-negative offset and we can start to read from there + store.readMessagesBin(channelName, 0, (msgObj, readMore, abort) => { + // tryParse return a parsed message or undefined + const msg = tryParse(Env, msgObj.buff.toString('utf8')); + // if it was undefined then go onto the next message + if (typeof msg === "undefined") { return readMore(); } + if (typeof(msg[4]) !== 'string' || lastKnownHash !== getHash(msg[4], Log)) { + return void readMore(); + } + offset = msgObj.offset; + abort(); + }, waitFor(function (err) { + if (err) { waitFor.abort(); return void cb(err); } + })); + }).nThen(() => { + cb(null, offset); + }); +}; + +/* getHistoryAsync + * finds the appropriate byte offset from which to begin reading using 'getHistoryOffset' + * streams through the rest of the messages, safely parsing them and returning the parsed content to the handler + * calls back when it has reached the end of the log + + Used by: + * GET_HISTORY + +*/ +const getHistoryAsync = (Env, channelName, lastKnownHash, beforeHash, handler, cb) => { + const store = Env.store; + + let offset = -1; + nThen((waitFor) => { + getHistoryOffset(Env, channelName, lastKnownHash, waitFor((err, os) => { + if (err) { + waitFor.abort(); + return void cb(err); + } + offset = os; + })); + }).nThen((waitFor) => { + if (offset === -1) { return void cb(new Error("could not find offset")); } + const start = (beforeHash) ? 0 : offset; + store.readMessagesBin(channelName, start, (msgObj, readMore, abort) => { + if (beforeHash && msgObj.offset >= offset) { return void abort(); } + var parsed = tryParse(Env, msgObj.buff.toString('utf8')); + if (!parsed) { return void readMore(); } + handler(parsed, readMore); + }, waitFor(function (err) { + return void cb(err); + })); + }); +}; + +/* getOlderHistory + * allows clients to query for all messages until a known hash is read + * stores all messages in history as they are read + * can therefore be very expensive for memory + * should probably be converted to a streaming interface + + Used by: + * GET_HISTORY_RANGE +*/ +const getOlderHistory = function (Env, channelName, oldestKnownHash, cb) { + const store = Env.store; + const Log = Env.Log; + var messageBuffer = []; + var found = false; + store.getMessages(channelName, function (msgStr) { + if (found) { return; } + + let parsed = tryParse(Env, msgStr); + if (typeof parsed === "undefined") { return; } + + // identify classic metadata messages by their inclusion of a channel. + // and don't send metadata, since: + // 1. the user won't be interested in it + // 2. this metadata is potentially incomplete/incorrect + if (isMetadataMessage(parsed)) { return; } + + var content = parsed[4]; + if (typeof(content) !== 'string') { return; } + + var hash = getHash(content, Log); + if (hash === oldestKnownHash) { + found = true; + } + messageBuffer.push(parsed); + }, function (err) { + if (err) { + Log.error("HK_GET_OLDER_HISTORY", err); + } + cb(messageBuffer); + }); +}; + +const handleRPC = function (Env, Server, seq, userId, parsed) { + const HISTORY_KEEPER_ID = Env.id; + + /* RPC Calls... */ + var rpc_call = parsed.slice(1); + + Server.send(userId, [seq, 'ACK']); + try { + // slice off the sequence number and pass in the rest of the message + Env.rpc(Server, userId, rpc_call, function (err, output) { + if (err) { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', err])]); + return; + } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0]].concat(output))]); + }); + } catch (e) { + // if anything throws in the middle, send an error + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify([parsed[0], 'ERROR', 'SERVER_ERROR'])]); + } +}; + +/* + This is called when a user tries to connect to a channel that doesn't exist. + we initialize that channel by writing the metadata supplied by the user to its log. + if the provided metadata has an expire time then we also create a task to expire it. +*/ +const handleFirstMessage = function (Env, channelName, metadata) { + Env.store.writeMetadata(channelName, JSON.stringify(metadata), function (err) { + if (err) { + // FIXME tell the user that there was a channel error? + return void Env.Log.error('HK_WRITE_METADATA', { + channel: channelName, + error: err, + }); + } + }); + + // write tasks + if(metadata.expire && typeof(metadata.expire) === 'number') { + // the fun part... + // the user has said they want this pad to expire at some point + Env.tasks.write(metadata.expire, "EXPIRE", [ channelName ], function (err) { + if (err) { + // if there is an error, we don't want to crash the whole server... + // just log it, and if there's a problem you'll be able to fix it + // at a later date with the provided information + Env.Log.error('HK_CREATE_EXPIRE_TASK', err); + Env.Log.info('HK_INVALID_EXPIRE_TASK', JSON.stringify([metadata.expire, 'EXPIRE', channelName])); + } + }); + } +}; + +const handleGetHistory = function (Env, Server, seq, userId, parsed) { + const metadata_cache = Env.metadata_cache; + const HISTORY_KEEPER_ID = Env.id; + const Log = Env.Log; + + // parsed[1] is the channel id + // parsed[2] is a validation key or an object containing metadata (optionnal) + // parsed[3] is the last known hash (optionnal) + + Server.send(userId, [seq, 'ACK']); + var channelName = parsed[1]; + var config = parsed[2]; + var metadata = {}; + var lastKnownHash; + var txid; + + // clients can optionally pass a map of attributes + // if the channel already exists this map will be ignored + // otherwise it will be stored as the initial metadata state for the channel + if (config && typeof config === "object" && !Array.isArray(parsed[2])) { + lastKnownHash = config.lastKnownHash; + metadata = config.metadata || {}; + txid = config.txid; + if (metadata.expire) { + metadata.expire = +metadata.expire * 1000 + (+new Date()); + } + } + metadata.channel = channelName; + metadata.created = +new Date(); + + // if the user sends us an invalid key, we won't be able to validate their messages + // so they'll never get written to the log anyway. Let's just drop their message + // on the floor instead of doing a bunch of extra work + // TODO send them an error message so they know something is wrong + if (metadata.validateKey && !isValidValidateKeyString(metadata.validateKey)) { + return void Log.error('HK_INVALID_KEY', metadata.validateKey); + } + + nThen(function (waitFor) { + var w = waitFor(); + /* fetch the channel's metadata. + use it to check if the channel has expired. + send it to the client if it exists. + */ + getMetadata(Env, channelName, waitFor(function (err, metadata) { + if (err) { + Env.Log.error('HK_GET_HISTORY_METADATA', { + channel: channelName, + error: err, + }); + return void w(); + } + if (!metadata || !metadata.channel) { return w(); } + // if there is already a metadata log then use it instead + // of whatever the user supplied + + // it's possible that the channel doesn't have metadata + // but in that case there's no point in checking if the channel expired + // or in trying to send metadata, so just skip this block + if (!metadata) { return void w(); } + + // And then check if the channel is expired. If it is, send the error and abort + // FIXME this is hard to read because 'checkExpired' has side effects + if (checkExpired(Env, Server, channelName)) { return void waitFor.abort(); } + + // always send metadata with GET_HISTORY requests + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)], w); + })); + }).nThen(() => { + let msgCount = 0; + + // TODO compute lastKnownHash in a manner such that it will always skip past the metadata line? + getHistoryAsync(Env, channelName, lastKnownHash, false, (msg, readMore) => { + msgCount++; + // avoid sending the metadata message a second time + if (isMetadataMessage(msg) && metadata_cache[channelName]) { return readMore(); } + if (txid) { msg[0] = txid; } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(msg)], readMore); + }, (err) => { + if (err && err.code !== 'ENOENT') { + if (err.message !== 'EINVAL') { Log.error("HK_GET_HISTORY", err); } + const parsedMsg = {error:err.message, channel: channelName, txid: txid}; + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + return; + } + + if (msgCount === 0 && !metadata_cache[channelName] && Server.channelContainsUser(channelName, userId)) { + handleFirstMessage(Env, channelName, metadata); + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(metadata)]); + } + + // End of history message: + let parsedMsg = {state: 1, channel: channelName, txid: txid}; + + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + }); + }); +}; + +const handleGetHistoryRange = function (Env, Server, seq, userId, parsed) { + var channelName = parsed[1]; + var map = parsed[2]; + const HISTORY_KEEPER_ID = Env.id; + + if (!(map && typeof(map) === 'object')) { + return void Server.send(userId, [seq, 'ERROR', 'INVALID_ARGS', HISTORY_KEEPER_ID]); + } + + var oldestKnownHash = map.from; + var desiredMessages = map.count; + var desiredCheckpoint = map.cpCount; + var txid = map.txid; + if (typeof(desiredMessages) !== 'number' && typeof(desiredCheckpoint) !== 'number') { + return void Server.send(userId, [seq, 'ERROR', 'UNSPECIFIED_COUNT', HISTORY_KEEPER_ID]); + } + + if (!txid) { + return void Server.send(userId, [seq, 'ERROR', 'NO_TXID', HISTORY_KEEPER_ID]); + } + + Server.send(userId, [seq, 'ACK']); + return void getOlderHistory(Env, channelName, oldestKnownHash, function (messages) { + var toSend = []; + if (typeof (desiredMessages) === "number") { + toSend = messages.slice(-desiredMessages); + } else { + let cpCount = 0; + for (var i = messages.length - 1; i >= 0; i--) { + if (/^cp\|/.test(messages[i][4]) && i !== (messages.length - 1)) { + cpCount++; + } + toSend.unshift(messages[i]); + if (cpCount >= desiredCheckpoint) { break; } + } + } + toSend.forEach(function (msg) { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, + JSON.stringify(['HISTORY_RANGE', txid, msg])]); + }); + + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, + JSON.stringify(['HISTORY_RANGE_END', txid, channelName]) + ]); + }); +}; + +const handleGetFullHistory = function (Env, Server, seq, userId, parsed) { + const HISTORY_KEEPER_ID = Env.id; + const Log = Env.Log; + + // parsed[1] is the channel id + // parsed[2] is a validation key (optionnal) + // parsed[3] is the last known hash (optionnal) + + Server.send(userId, [seq, 'ACK']); + + // FIXME should we send metadata here too? + // none of the clientside code which uses this API needs metadata, but it won't hurt to send it (2019-08-22) + return void getHistoryAsync(Env, parsed[1], -1, false, (msg, readMore) => { + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(['FULL_HISTORY', msg])], readMore); + }, (err) => { + let parsedMsg = ['FULL_HISTORY_END', parsed[1]]; + if (err) { + Log.error('HK_GET_FULL_HISTORY', err.stack); + parsedMsg = ['ERROR', parsed[1], err.message]; + } + Server.send(userId, [0, HISTORY_KEEPER_ID, 'MSG', userId, JSON.stringify(parsedMsg)]); + }); +}; + +const directMessageCommands = { + GET_HISTORY: handleGetHistory, + GET_HISTORY_RANGE: handleGetHistoryRange, + GET_FULL_HISTORY: handleGetFullHistory, +}; + +/* onDirectMessage + * exported for use by the netflux-server + * parses and handles all direct messages directed to the history keeper + * check if it's expired and execute all the associated side-effects + * routes queries to the appropriate handlers +*/ +HK.onDirectMessage = function (Env, Server, seq, userId, json) { + const Log = Env.Log; + const HISTORY_KEEPER_ID = Env.id; + Log.silly('HK_MESSAGE', json); + + let parsed; + try { + parsed = JSON.parse(json[2]); + } catch (err) { + Log.error("HK_PARSE_CLIENT_MESSAGE", json); + return; + } + + var first = parsed[0]; + + if (typeof(directMessageCommands[first]) !== 'function') { + // it's either an unsupported command or an RPC call + // either way, RPC has it covered + return void handleRPC(Env, Server, seq, userId, parsed); + } + + // otherwise it's some kind of history retrieval command... + // go grab its metadata, because unfortunately people can ask for history + // whether or not they have joined the channel, so we can't rely on JOIN restriction + // to stop people from loading history they shouldn't see. + var channelName = parsed[1]; + nThen(function (w) { + getMetadata(Env, channelName, w(function (err, metadata) { + if (err) { + // stream errors? + // we should log these, but if we can't load metadata + // then it's probably not restricted or expired + // it's not like anything else will recover from this anyway + return; + } + + + // likewise, we can't do anything more here if there's no metadata + // jump to the next block + if (!metadata) { return; } + + // If the requested history is for an expired channel, abort + // checkExpired has side effects and will disconnect users for you... + if (checkExpired(Env, Server, parsed[1])) { + // if the channel is expired just abort. + w.abort(); + return; + } + + // jump to handling the command if there's no restriction... + if (!metadata.restricted) { return; } + + // check if the user is in the allow list... + const allowed = HK.listAllowedUsers(metadata); + const session = HK.getNetfluxSession(Env, userId); + + if (HK.isUserSessionAllowed(allowed, session)) { + return; + } + +/* Anyone in the userlist that isn't in the allow list should have already + been kicked out of the channel. Likewise, disallowed users should not + be able to add themselves to the userlist because JOIN commands respect + access control settings. The error that is sent below protects against + the remaining case, in which users try to get history without having + joined the channel. Normally we'd send the allow list to tell them the + key with which they should authenticate, but since we don't use this + behaviour, I'm doing the easy thing and just telling them to GO AWAY. + + We can implement the more advanced behaviour later if it turns out that + we need it. This command validates guards against all kinds of history + access: GET_HISTORY, GET_HISTORY_RANGE, GET_FULL_HISTORY. +*/ + + w.abort(); + return void Server.send(userId, [ + seq, + 'ERROR', + 'ERESTRICTED', + HISTORY_KEEPER_ID + ]); + })); + }).nThen(function () { + // run the appropriate command from the map + directMessageCommands[first](Env, Server, seq, userId, parsed); + }); +}; + +/* onChannelMessage + Determine what we should store when a message a broadcasted to a channel" + + * ignores ephemeral channels + * ignores messages sent to expired channels + * rejects duplicated checkpoints + * validates messages to channels that have validation keys + * caches the id of the last saved checkpoint + * adds timestamps to incoming messages + * writes messages to the store +*/ +HK.onChannelMessage = function (Env, Server, channel, msgStruct) { + const Log = Env.Log; + + // TODO our usage of 'channel' here looks prone to errors + // we only use it for its 'id', but it can contain other stuff + // also, we're using this RPC from both the RPC and Netflux-server + // we should probably just change this to expect a channel id directly + + // don't store messages if the channel id indicates that it's an ephemeral message + if (!channel.id || channel.id.length === EPHEMERAL_CHANNEL_LENGTH) { return; } + + const isCp = /^cp\|/.test(msgStruct[4]); + let id; + if (isCp) { + // id becomes either null or an array or results... + id = CHECKPOINT_PATTERN.exec(msgStruct[4]); + if (Array.isArray(id) && id[2] && id[2] === channel.lastSavedCp) { + // Reject duplicate checkpoints + return; + } + } + + let metadata; + nThen(function (w) { + getMetadata(Env, channel.id, w(function (err, _metadata) { + // if there's no channel metadata then it can't be an expiring channel + // nor can we possibly validate it + if (!_metadata) { return; } + metadata = _metadata; + + // don't write messages to expired channels + if (checkExpired(Env, Server, channel)) { return void w.abort(); } + })); + }).nThen(function (w) { + // if there's no validateKey present skip to the next block + if (!(metadata && metadata.validateKey)) { return; } + + // trim the checkpoint indicator off the message if it's present + let signedMsg = (isCp) ? msgStruct[4].replace(CHECKPOINT_PATTERN, '') : msgStruct[4]; + // convert the message from a base64 string into a Uint8Array + + // FIXME this can fail and the client won't notice + signedMsg = Nacl.util.decodeBase64(signedMsg); + + // FIXME this can blow up + // TODO check that that won't cause any problems other than not being able to append... + const validateKey = Nacl.util.decodeBase64(metadata.validateKey); + // validate the message + const validated = Nacl.sign.open(signedMsg, validateKey); + if (!validated) { + // don't go any further if the message fails validation + w.abort(); + Log.info("HK_SIGNED_MESSAGE_REJECTED", 'Channel '+channel.id); + return; + } + }).nThen(function () { + // do checkpoint stuff... + + // 1. get the checkpoint id + // 2. reject duplicate checkpoints + + if (isCp) { + // if the message is a checkpoint we will have already validated + // that it isn't a duplicate. remember its id so that we can + // repeat this process for the next incoming checkpoint + + // WARNING: the fact that we only check the most recent checkpoints + // is a potential source of bugs if one editor has high latency and + // pushes a duplicate of an earlier checkpoint than the latest which + // has been pushed by editors with low latency + // FIXME + if (Array.isArray(id) && id[2]) { + // Store new checkpoint hash + channel.lastSavedCp = id[2]; + } + } + + // add the time to the message + msgStruct.push(now()); + + // storeMessage + storeMessage(Env, channel, JSON.stringify(msgStruct), isCp, getHash(msgStruct[4], Log)); + }); +}; + + diff --git a/lib/load-config.js b/lib/load-config.js index 0756c2df4..4d6fa894f 100644 --- a/lib/load-config.js +++ b/lib/load-config.js @@ -1,7 +1,7 @@ /* jslint node: true */ "use strict"; var config; -var configPath = process.env.CRYPTPAD_CONFIG || "../config/config"; +var configPath = process.env.CRYPTPAD_CONFIG || "../config/config.js"; try { config = require(configPath); if (config.adminEmail === 'i.did.not.read.my.config@cryptpad.fr') { @@ -18,5 +18,29 @@ try { } config = require("../config/config.example"); } + +var isPositiveNumber = function (n) { + return (!isNaN(n) && n >= 0); +}; + +if (!isPositiveNumber(config.inactiveTime)) { + config.inactiveTime = 90; +} +if (!isPositiveNumber(config.archiveRetentionTime)) { + config.archiveRetentionTime = 90; +} +if (!isPositiveNumber(config.maxUploadSize)) { + config.maxUploadSize = 20 * 1024 * 1024; +} +if (!isPositiveNumber(config.defaultStorageLimit)) { + config.defaultStorageLimit = 50 * 1024 * 1024; +} + +// premiumUploadSize is worthless if it isn't a valid positive number +// or if it's less than the default upload size +if (!isPositiveNumber(config.premiumUploadSize) || config.premiumUploadSize < config.defaultStorageLimit) { + delete config.premiumUploadSize; +} + module.exports = config; diff --git a/lib/log.js b/lib/log.js index 7de6badb8..756da8734 100644 --- a/lib/log.js +++ b/lib/log.js @@ -1,5 +1,5 @@ /*jshint esversion: 6 */ -var Store = require("../storage/file"); +var Store = require("./storage/file"); var Logger = module.exports; diff --git a/lib/metadata.js b/lib/metadata.js index de40043af..97f2e484a 100644 --- a/lib/metadata.js +++ b/lib/metadata.js @@ -1,24 +1,170 @@ var Meta = module.exports; -var deduplicate = require("./deduplicate"); +var deduplicate = require("./common-util").deduplicateString; -/* Metadata fields: +/* Metadata fields and the commands that can modify them + +we assume that these commands can only be performed +by owners or in some cases pending owners. Thus +the owners field is guaranteed to exist. * channel * validateKey * owners * ADD_OWNERS * RM_OWNERS + * RESET_OWNERS + * pending_owners + * ADD_PENDING_OWNERS + * RM_PENDING_OWNERS * expire + * UPDATE_EXPIRATION (NOT_IMPLEMENTED) + * restricted + * RESTRICT_ACCESS + * allowed + * ADD_ALLOWED + * RM_ALLOWED + * RESET_ALLOWED + * ADD_OWNERS + * RESET_OWNERS + * mailbox + * ADD_MAILBOX + * RM_MAILBOX */ var commands = {}; -var isValidOwner = function (owner) { +var isValidPublicKey = function (owner) { return typeof(owner) === 'string' && owner.length === 44; }; +// isValidPublicKey is a better indication of what the above function does +// I'm preserving this function name in case we ever want to expand its +// criteria at a later time... +var isValidOwner = isValidPublicKey; + +// ["RESTRICT_ACCESS", [true], 1561623438989] +// ["RESTRICT_ACCESS", [false], 1561623438989] +commands.RESTRICT_ACCESS = function (meta, args) { + if (!Array.isArray(args) || typeof(args[0]) !== 'boolean') { + throw new Error('INVALID_STATE'); + } + + var bool = args[0]; + + // reject the proposed command if there is no change in state + if (meta.restricted === bool) { return false; } + + // apply the new state + meta.restricted = args[0]; + + // if you're disabling access restrictions then you can assume + // then there is nothing more to do. Leave the existing list as-is + if (!bool) { return true; } + + // you're all set if an allow list already exists + if (Array.isArray(meta.allowed)) { return true; } + + // otherwise define it + meta.allowed = []; + + return true; +}; + +// ["ADD_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989] +commands.ADD_ALLOWED = function (meta, args) { + if (!Array.isArray(args)) { + throw new Error("INVALID_ARGS"); + } + + var allowed = meta.allowed || []; + + var changed = false; + args.forEach(function (arg) { + // don't add invalid public keys + if (!isValidPublicKey(arg)) { return; } + // don't add owners to the allow list + if (meta.owners.indexOf(arg) >= 0) { return; } + // don't duplicate entries in the allow list + if (allowed.indexOf(arg) >= 0) { return; } + allowed.push(arg); + changed = true; + }); + + if (changed) { + meta.allowed = meta.allowed || allowed; + } + + return changed; +}; + +// ["RM_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989] +commands.RM_ALLOWED = function (meta, args) { + if (!Array.isArray(args)) { + throw new Error("INVALID_ARGS"); + } + + // there may not be anything to remove + if (!meta.allowed) { return false; } + + var changed = false; + args.forEach(function (arg) { + var index = meta.allowed.indexOf(arg); + if (index < 0) { return; } + meta.allowed.splice(index, 1); + changed = true; + }); + + return changed; +}; + +var arrayHasChanged = function (A, B) { + var changed; + A.some(function (a) { + if (B.indexOf(a) < 0) { return (changed = true); } + }); + if (changed) { return true; } + B.some(function (b) { + if (A.indexOf(b) < 0) { return (changed = true); } + }); + return changed; +}; + +var filterInPlace = function (A, f) { + for (var i = A.length - 1; i >= 0; i--) { + if (f(A[i], i, A)) { A.splice(i, 1); } + } +}; + +// ["RESET_ALLOWED", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I=", ...], 1561623438989] +commands.RESET_ALLOWED = function (meta, args) { + if (!Array.isArray(args)) { throw new Error("INVALID_ARGS"); } + + var updated = args.filter(function (arg) { + // don't allow invalid public keys + if (!isValidPublicKey(arg)) { return false; } + // don't ever add owners to the allow list + if (meta.owners.indexOf(arg)) { return false; } + return true; + }); + + // this is strictly an optimization... + // a change in length is a clear indicator of a functional change + if (meta.allowed && meta.allowed.length !== updated.length) { + meta.allowed = updated; + return true; + } + + // otherwise we must check that the arrays contain distinct elements + // if there is no functional change, then return false + if (!arrayHasChanged(meta.allowed, updated)) { return false; } + + // otherwise overwrite the in-memory data and indicate that there was a change + meta.allowed = updated; + return true; +}; + // ["ADD_OWNERS", ["7eEqelGso3EBr5jHlei6av4r9w2B9XZiGGwA1EgZ-5I="], 1561623438989] commands.ADD_OWNERS = function (meta, args) { // bail out if args isn't an array @@ -40,6 +186,13 @@ commands.ADD_OWNERS = function (meta, args) { changed = true; }); + if (changed && Array.isArray(meta.allowed)) { + // make sure owners are not included in the allow list + filterInPlace(meta.allowed, function (member) { + return meta.owners.indexOf(member) !== -1; + }); + } + return changed; }; @@ -71,6 +224,10 @@ commands.RM_OWNERS = function (meta, args) { changed = true; }); + if (meta.owners.length === 0 && meta.restricted) { + meta.restricted = false; + } + return changed; }; @@ -141,6 +298,18 @@ commands.RESET_OWNERS = function (meta, args) { // overwrite the existing owners with the new one meta.owners = deduplicate(args.filter(isValidOwner)); + + if (Array.isArray(meta.allowed)) { + // make sure owners are not included in the allow list + filterInPlace(meta.allowed, function (member) { + return meta.owners.indexOf(member) !== -1; + }); + } + + if (meta.owners.length === 0 && meta.restricted) { + meta.restricted = false; + } + return true; }; @@ -178,6 +347,25 @@ commands.ADD_MAILBOX = function (meta, args) { return changed; }; +commands.RM_MAILBOX = function (meta, args) { + if (!Array.isArray(args)) { throw new Error("INVALID_ARGS"); } + if (!meta.mailbox || typeof(meta.mailbox) === 'undefined') { + return false; + } + if (typeof(meta.mailbox) === 'string' && args.length === 0) { + delete meta.mailbox; + return true; + } + + var changed = false; + args.forEach(function (arg) { + if (meta.mailbox[arg] === 'undefined') { return; } + delete meta.mailbox[arg]; + changed = true; + }); + return changed; +}; + commands.UPDATE_EXPIRATION = function () { throw new Error("E_NOT_IMPLEMENTED"); }; @@ -198,6 +386,7 @@ Meta.commands = Object.keys(commands); Meta.createLineHandler = function (ref, errorHandler) { ref.meta = {}; ref.index = 0; + ref.logged = {}; return function (err, line) { if (err) { @@ -211,13 +400,20 @@ Meta.createLineHandler = function (ref, errorHandler) { line: JSON.stringify(line), }); } + + // the case above is special, everything else should increment the index + var index = ref.index++; if (typeof(line) === 'undefined') { return; } + if (Array.isArray(line)) { try { handleCommand(ref.meta, line); - ref.index++; } catch (err2) { + var code = err2.message; + if (ref.logged[code]) { return; } + + ref.logged[code] = true; errorHandler("METADATA_COMMAND_ERR", { error: err2.stack, line: line, @@ -226,8 +422,15 @@ Meta.createLineHandler = function (ref, errorHandler) { return; } - if (ref.index === 0 && typeof(line) === 'object') { - ref.index++; + // the first line of a channel is processed before the dedicated metadata log. + // it can contain a map, in which case it should be used as the initial state. + // it's possible that a trim-history command was interrupted, in which case + // this first message might exist in parallel with the more recent metadata log + // which will contain the computed state of the previous metadata log + // which has since been archived. + // Thus, accept both the first and second lines you process as valid initial state + // preferring the second if it exists + if (index < 2 && line && typeof(line) === 'object') { // special case! ref.meta = line; return; @@ -235,7 +438,7 @@ Meta.createLineHandler = function (ref, errorHandler) { errorHandler("METADATA_HANDLER_WEIRDLINE", { line: line, - index: ref.index++, + index: index, }); }; }; diff --git a/lib/once.js b/lib/once.js deleted file mode 100644 index a851af259..000000000 --- a/lib/once.js +++ /dev/null @@ -1,7 +0,0 @@ -module.exports = function (f, g) { - return function () { - if (!f) { return; } - f.apply(this, Array.prototype.slice.call(arguments)); - f = g; - }; -}; diff --git a/lib/pins.js b/lib/pins.js index 23b1364a3..41e871446 100644 --- a/lib/pins.js +++ b/lib/pins.js @@ -2,6 +2,11 @@ var Pins = module.exports; +const Fs = require("fs"); +const Path = require("path"); +const Util = require("./common-util"); +const Plan = require("./plan"); + /* Accepts a reference to an object, and... either a string describing which log is being processed (backwards compatibility), or a function which will log the error with all relevant data @@ -22,7 +27,11 @@ var createLineHandler = Pins.createLineHandler = function (ref, errorHandler) { // make sure to get ref.pins as the result // it's a weird API but it's faster than unpinning manually var pins = ref.pins = {}; + ref.index = 0; + ref.latest = 0; // the latest message (timestamp in ms) + ref.surplus = 0; // how many lines exist behind a reset return function (line) { + ref.index++; if (!Boolean(line)) { return; } var l; @@ -36,10 +45,15 @@ var createLineHandler = Pins.createLineHandler = function (ref, errorHandler) { return void errorHandler('PIN_LINE_NOT_FORMAT_ERROR', l); } + if (typeof(l[2]) === 'number') { + ref.latest = l[2]; // date + } + switch (l[0]) { case 'RESET': { pins = ref.pins = {}; if (l[1] && l[1].length) { l[1].forEach((x) => { ref.pins[x] = 1; }); } + ref.surplus = ref.index; //jshint -W086 // fallthrough } @@ -72,5 +86,111 @@ Pins.calculateFromLog = function (pinFile, fileName) { return Object.keys(ref.pins); }; -// TODO refactor to include a streaming version for use in rpc.js as well +/* + pins/ + pins/A+/ + pins/A+/A+hyhrQLrgYixOomZYxpuEhwfiVzKk1bBp+arH-zbgo=.ndjson +*/ + +const getSafeKeyFromPath = function (path) { + return path.replace(/^.*\//, '').replace(/\.ndjson/, ''); +}; + +const addUserPinToState = Pins.addUserPinToState = function (state, safeKey, itemId) { + (state[itemId] = state[itemId] || {})[safeKey] = 1; +}; + +Pins.list = function (_done, config) { + // allow for a configurable pin store location + const pinPath = config.pinPath || './data/pins'; + + // allow for a configurable amount of parallelism + const plan = Plan(config.workers || 5); + + // run a supplied handler whenever you finish reading a log + // or noop if not supplied. + const handler = config.handler || function () {}; + + // use and mutate a supplied object for state if it's passed + const pinned = config.pinned || {}; + + var isDone = false; + // ensure that 'done' is only called once + // that it calls back asynchronously + // and that it sets 'isDone' to true, so that pending processes + // know to abort + const done = Util.once(Util.both(Util.mkAsync(_done), function () { + isDone = true; + })); + const errorHandler = function (label, info) { + console.log(label, info); + }; + + // TODO replace this with lib-readline? + const streamFile = function (path, cb) { + const id = getSafeKeyFromPath(path); + + return void Fs.readFile(path, 'utf8', function (err, body) { + if (err) { return void cb(err); } + const ref = {}; + const pinHandler = createLineHandler(ref, errorHandler); + var lines = body.split('\n'); + lines.forEach(pinHandler); + handler(ref, id, pinned); + cb(void 0, ref); + }); + }; + + const scanDirectory = function (path, cb) { + Fs.readdir(path, function (err, list) { + if (err) { + return void cb(err); + } + cb(void 0, list.map(function (item) { + return { + path: Path.join(path, item), + id: item.replace(/\.ndjson$/, ''), + }; + })); + }); + }; + + scanDirectory(pinPath, function (err, dirs) { + if (err) { + if (err.code === 'ENOENT') { return void done(void 0, {}); } + return void done(err); + } + dirs.forEach(function (dir) { + plan.job(1, function (next) { + if (isDone) { return void next(); } + scanDirectory(dir.path, function (nested_err, logs) { + if (nested_err) { + return void done(err); + } + logs.forEach(function (log) { + if (!/\.ndjson$/.test(log.path)) { return; } + plan.job(0, function (next) { + if (isDone) { return void next(); } + streamFile(log.path, function (err, ref) { + if (err) { return void done(err); } + + var set = ref.pins; + for (var item in set) { + addUserPinToState(pinned, log.id, item); + } + next(); + }); + }); + }); + next(); + }); + }); + }); + + plan.done(function () { + // err ? + done(void 0, pinned); + }).start(); + }); +}; diff --git a/lib/plan.js b/lib/plan.js new file mode 100644 index 000000000..a7dbb4ec8 --- /dev/null +++ b/lib/plan.js @@ -0,0 +1,235 @@ +/* + +There are many situations where we want to do lots of little jobs +in parallel and with few constraints as to their ordering. + +One example is recursing over a bunch of directories and reading files. +The naive way to do this is to recurse over all the subdirectories +relative to a root while adding files to a list. Then to iterate over +the files in that list. Unfortunately, this means holding the complete +list of file paths in memory, which can't possible scale as our database grows. + +A better way to do this is to recurse into one directory and +iterate over its contents until there are no more, then to backtrack +to the next directory and repeat until no more directories exist. +This kind of thing is easy enough when you perform one task at a time +and use synchronous code, but with multiple asynchronous tasks it's +easy to introduce subtle bugs. + +This module is designed for these situations. It allows you to easily +and efficiently schedule a large number of tasks with an associated +degree of priority from 0 (highest priority) to Number.MAX_SAFE_INTEGER. + +Initialize your scheduler with a degree of parallelism, and start planning +some initial jobs. Set it to run and it will keep going until all jobs are +complete, at which point it will optionally execute a 'done' callback. + +Getting back to the original example: + +List the contents of the root directory, then plan subsequent jobs +with a priority of 1 to recurse into subdirectories. The callback +of each of these recursions can then plan higher priority tasks +to actually process the contained files with a priority of 0. + +As long as there are more files scheduled it will continue to process +them first. When there are no more files the scheduler will read +the next directory and repopulate the list of files to process. +This will repeat until everything is done. + +// load the module +const Plan = require("./plan"); + +// instantiate a scheduler with a parallelism of 5 +var plan = Plan(5) + +// plan the first job which schedules more jobs... +.job(1, function (next) { + listRootDirectory(function (files) { + files.forEach(function (file) { + // highest priority, run as soon as there is a free worker + plan.job(0, function (next) { + processFile(file, function (result) { + console.log(result); + // don't forget to call next + next(); + }); + }); + }); + next(); // call 'next' to free up one worker + }); +}) +// chain commands together if you want +.done(function () { + console.log("DONE"); +}) +// it won't run unless you launch it +.start(); + +*/ + +module.exports = function (max) { + var plan = {}; + max = max || 5; + + // finds an id that isn't in use in a particular map + // accepts an id in case you have one already chosen + // otherwise generates random new ids if one is not passed + // or if there is a collision + var uid = function (map, id) { + if (typeof(id) === 'undefined') { + id = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER); + } + if (id && typeof(map[id]) === 'undefined') { + return id; + } + return uid(map); + }; + + // the queue of jobs is an array, which will be populated + // with maps for each level of priority + var jobs = []; + + // the count of currently running jobs + var count = 0; + + // a list of callbacks to be executed once everything is done + var completeHandlers = []; + + // the recommended usage is to create a new scheduler for every job + // use it for internals in a scope, and let the garbage collector + // clean up when everything stops. This means you shouldn't + // go passing 'plan' around in a long-lived process! + var FINISHED = false; + var done = function () { + // 'done' gets called when there are no more jobs in the queue + // but other jobs might still be running... + + // the count of running processes should never be less than zero + // because we guard against multiple callbacks + if (count < 0) { throw new Error("should never happen"); } + // greater than zero is definitely possible, it just means you aren't done yet + if (count !== 0) { return; } + // you will finish twice if you call 'start' a second time + // this behaviour isn't supported yet. + if (FINISHED) { throw new Error('finished twice'); } + FINISHED = true; + // execute all your 'done' callbacks + completeHandlers.forEach(function (f) { f(); }); + }; + + var run; + + // this 'next' is internal only. + // it iterates over all known jobs, running them until + // the scheduler achieves the desired amount of parallelism. + // If there are no more jobs it will call 'done' + // which will shortcircuit if there are still pending tasks. + // Whenever any tasks finishes it will return its lock and + // run as many new jobs as are allowed. + var next = function () { + // array.some skips over bare indexes in sparse arrays + var pending = jobs.some(function (bag /*, priority*/) { + if (!bag || typeof(bag) !== 'object') { return; } + // a bag is a map of jobs for any particular degree of priority + // iterate over jobs in the bag until you're out of 'workers' + for (var id in bag) { + // bail out if you hit max parallelism + if (count >= max) { return true; } + run(bag, id, next); + } + }); + // check whether you're done if you hit the end of the array + if (!pending) { done(); } + }; + + // and here's the part that actually handles jobs... + run = function (bag, id) { + // this is just a sanity check. + // there should only ever be jobs in each bag. + if (typeof(bag[id]) !== 'function') { + throw new Error("expected function"); + } + + // keep a local reference to the function + var f = bag[id]; + // remove it from the bag. + delete bag[id]; + // increment the count of running jobs + count++; + + // guard against it being called twice. + var called = false; + f(function () { + // watch out! it'll bite you. + // maybe this should just return? + // support that option for 'production' ? + if (called) { throw new Error("called twice"); } + // the code below is safe because we can't call back a second time + called = true; + + // decrement the count of running jobs... + count--; + + // and finally call next to replace this worker with more job(s) + next(); + }); + }; + + // this is exposed as API + plan.job = function (priority, cb) { + // you have to pass both the priority (a non-negative number) and an actual job + if (typeof(priority) !== 'number' || priority < 0) { throw new Error('expected a non-negative number'); } + // a job is an asynchronous function that takes a single parameter: + // a 'next' callback which will keep the whole thing going. + // forgetting to call 'next' means you'll never complete. + if (typeof(cb) !== 'function') { throw new Error('expected function'); } + + // initialize the specified priority level if it doesn't already exist + var bag = jobs[priority] = jobs[priority] || {}; + // choose a random id that isn't already in use for this priority level + var id = uid(bag); + + // add the job to this priority level's bag + // most (all?) javascript engines will append this job to the bottom + // of the map. Meaning when we iterate it will be run later than + // other jobs that were scheduled first, effectively making a FIFO queue. + // However, this is undefined behaviour and you shouldn't ever rely on it. + bag[id] = function (next) { + cb(next); + }; + // returning 'plan' lets us chain methods together. + return plan; + }; + + var started = false; + plan.start = function () { + // don't allow multiple starts + // even though it should work, it's simpler not to. + if (started) { return plan; } + // this seems to imply a 'stop' method + // but I don't need it, so I'm not implementing it now --ansuz + started = true; + + // start asynchronously, otherwise jobs will start running + // before you've had a chance to return 'plan', and weird things + // happen. + setTimeout(function () { + next(); + }); + return plan; + }; + + // you can pass any number of functions to be executed + // when all pending jobs are complete. + // We don't pass any arguments, so you need to handle return values + // yourself if you want them. + plan.done = function (f) { + if (typeof(f) !== 'function') { throw new Error('expected function'); } + completeHandlers.push(f); + return plan; + }; + + // That's all! I hope you had fun reading this! + return plan; +}; + diff --git a/lib/rpc.js b/lib/rpc.js new file mode 100644 index 000000000..241f77d41 --- /dev/null +++ b/lib/rpc.js @@ -0,0 +1,216 @@ +/*jshint esversion: 6 */ +const Util = require("./common-util"); + +const Core = require("./commands/core"); +const Admin = require("./commands/admin-rpc"); +const Pinning = require("./commands/pin-rpc"); +const Quota = require("./commands/quota"); +const Block = require("./commands/block"); +const Metadata = require("./commands/metadata"); +const Channel = require("./commands/channel"); +const Upload = require("./commands/upload"); +const HK = require("./hk-util"); + +var RPC = module.exports; + +const UNAUTHENTICATED_CALLS = { + GET_FILE_SIZE: Pinning.getFileSize, + GET_MULTIPLE_FILE_SIZE: Pinning.getMultipleFileSize, + GET_DELETED_PADS: Pinning.getDeletedPads, + IS_CHANNEL_PINNED: Pinning.isChannelPinned, + IS_NEW_CHANNEL: Channel.isNewChannel, + WRITE_PRIVATE_MESSAGE: Channel.writePrivateMessage, + GET_METADATA: Metadata.getMetadata, +}; + +var isUnauthenticateMessage = function (msg) { + return msg && msg.length === 2 && typeof(UNAUTHENTICATED_CALLS[msg[0]]) === 'function'; +}; + +var handleUnauthenticatedMessage = function (Env, msg, respond, Server, netfluxId) { + Env.Log.silly('LOG_RPC', msg[0]); + + var method = UNAUTHENTICATED_CALLS[msg[0]]; + method(Env, msg[1], function (err, value) { + if (err) { + Env.WARN(err, msg[1]); + return void respond(err); + } + respond(err, [null, value, null]); + }, Server, netfluxId); +}; + +const AUTHENTICATED_USER_TARGETED = { + RESET: Pinning.resetUserPins, + PIN: Pinning.pinChannel, + UNPIN: Pinning.unpinChannel, + CLEAR_OWNED_CHANNEL: Channel.clearOwnedChannel, + REMOVE_OWNED_CHANNEL: Channel.removeOwnedChannel, + TRIM_HISTORY: Channel.trimHistory, + UPLOAD_STATUS: Upload.status, + UPLOAD: Upload.upload, + UPLOAD_COMPLETE: Upload.complete, + UPLOAD_CANCEL: Upload.cancel, + OWNED_UPLOAD_COMPLETE: Upload.complete_owned, + WRITE_LOGIN_BLOCK: Block.writeLoginBlock, + REMOVE_LOGIN_BLOCK: Block.removeLoginBlock, + ADMIN: Admin.command, + SET_METADATA: Metadata.setMetadata, +}; + +const AUTHENTICATED_USER_SCOPED = { + GET_HASH: Pinning.getHash, + GET_TOTAL_SIZE: Pinning.getTotalSize, + UPDATE_LIMITS: Quota.getUpdatedLimit, + GET_LIMIT: Pinning.getLimit, + EXPIRE_SESSION: Core.expireSessionAsync, + REMOVE_PINS: Pinning.removePins, + TRIM_PINS: Pinning.trimPins, + COOKIE: Core.haveACookie, +}; + +var isAuthenticatedCall = function (call) { + if (call === 'UPLOAD') { return false; } + return typeof(AUTHENTICATED_USER_TARGETED[call] || AUTHENTICATED_USER_SCOPED[call]) === 'function'; +}; + +var handleAuthenticatedMessage = function (Env, unsafeKey, msg, respond, Server) { + /* If you have gotten this far, you have signed the message with the + public key which you provided. + */ + + var safeKey = Util.escapeKeyCharacters(unsafeKey); + + var Respond = function (e, value) { + var session = Env.Sessions[safeKey]; + var token = session? session.tokens.slice(-1)[0]: ''; + var cookie = Core.makeCookie(token).join('|'); + respond(e ? String(e): e, [cookie].concat(typeof(value) !== 'undefined' ?value: [])); + }; + + msg.shift(); + // discard validated cookie from message + if (!msg.length) { + return void Respond('INVALID_MSG'); + } + + var TYPE = msg[0]; + + Env.Log.silly('LOG_RPC', TYPE); + + if (typeof(AUTHENTICATED_USER_TARGETED[TYPE]) === 'function') { + return void AUTHENTICATED_USER_TARGETED[TYPE](Env, safeKey, msg[1], function (e, value) { + Env.WARN(e, value); + return void Respond(e, value); + }, Server); + } + + if (typeof(AUTHENTICATED_USER_SCOPED[TYPE]) === 'function') { + return void AUTHENTICATED_USER_SCOPED[TYPE](Env, safeKey, function (e, value) { + if (e) { + Env.WARN(e, safeKey); + return void Respond(e); + } + Respond(e, value); + }); + } + + return void Respond('UNSUPPORTED_RPC_CALL', msg); +}; + +var rpc = function (Env, Server, userId, data, respond) { + if (!Array.isArray(data)) { + Env.Log.debug('INVALID_ARG_FORMET', data); + return void respond('INVALID_ARG_FORMAT'); + } + + if (!data.length) { + return void respond("INSUFFICIENT_ARGS"); + } else if (data.length !== 1) { + Env.Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); + } + + var msg = data[0].slice(0); + + if (!Array.isArray(msg)) { + return void respond('INVALID_ARG_FORMAT'); + } + + if (isUnauthenticateMessage(msg)) { + return handleUnauthenticatedMessage(Env, msg, respond, Server, userId); + } + + var signature = msg.shift(); + var publicKey = msg.shift(); + + // make sure a user object is initialized in the cookie jar + var session; + if (publicKey) { + session = Core.getSession(Env.Sessions, publicKey); + } else { + Env.Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); + } + + var cookie = msg[0]; + if (!Core.isValidCookie(Env.Sessions, publicKey, cookie)) { + // no cookie is fine if the RPC is to get a cookie + if (msg[1] !== 'COOKIE') { + return void respond('NO_COOKIE'); + } + } + + var serialized = JSON.stringify(msg); + + if (!(serialized && typeof(publicKey) === 'string')) { + return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); + } + + var command = msg[1]; + + if (command === 'UPLOAD') { + // UPLOAD is a special case that skips signature validation + // intentional fallthrough behaviour + return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); + } + if (isAuthenticatedCall(command)) { + // check the signature on the message + // refuse the command if it doesn't validate + if (Core.checkSignature(Env, serialized, signature, publicKey) === true) { + HK.authenticateNetfluxSession(Env, userId, publicKey); + return void handleAuthenticatedMessage(Env, publicKey, msg, respond, Server); + } + return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); + } + Env.Log.warn('INVALID_RPC_CALL', command); + return void respond("INVALID_RPC_CALL"); +}; + +RPC.create = function (Env, cb) { + var Sessions = Env.Sessions; + var updateLimitDaily = function () { + Quota.updateCachedLimits(Env, function (e) { + if (e) { + Env.WARN('limitUpdate', e); + } + }); + }; + Quota.applyCustomLimits(Env); + updateLimitDaily(); + Env.intervals.dailyLimitUpdate = setInterval(updateLimitDaily, 24*3600*1000); + + //Pinning.loadChannelPins(Env); // XXX + + // expire old sessions once per minute + Env.intervals.sessionExpirationInterval = setInterval(function () { + Core.expireSessions(Sessions); + }, Core.SESSION_EXPIRATION_TIME); + + cb(void 0, function (Server, userId, data, respond) { + try { + return rpc(Env, Server, userId, data, respond); + } catch (e) { + console.log("Error from RPC with data " + JSON.stringify(data)); + console.log(e.stack); + } + }); +}; diff --git a/lib/schedule.js b/lib/schedule.js new file mode 100644 index 000000000..1fdef8cce --- /dev/null +++ b/lib/schedule.js @@ -0,0 +1,172 @@ +var WriteQueue = require("./write-queue"); +var Util = require("./common-util"); + +/* This module provides implements a FIFO scheduler + which assumes the existence of three types of async tasks: + + 1. ordered tasks which must be executed sequentially + 2. unordered tasks which can be executed in parallel + 3. blocking tasks which must block the execution of all other tasks + + The scheduler assumes there will be many resources identified by strings, + and that the constraints described above will only apply in the context + of identical string ids. + + Many blocking tasks may be executed in parallel so long as they + concern resources identified by different ids. + +USAGE: + + const schedule = require("./schedule")(); + + // schedule two sequential tasks using the resource 'pewpew' + schedule.ordered('pewpew', function (next) { + appendToFile('beep\n', next); + }); + schedule.ordered('pewpew', function (next) { + appendToFile('boop\n', next); + }); + + // schedule a task that can happen whenever + schedule.unordered('pewpew', function (next) { + displayFileSize(next); + }); + + // schedule a blocking task which will wait + // until the all unordered tasks have completed before commencing + schedule.blocking('pewpew', function (next) { + deleteFile(next); + }); + + // this will be queued for after the blocking task + schedule.ordered('pewpew', function (next) { + appendFile('boom', next); + }); + +*/ + +// return a uid which is not already in a map +var unusedUid = function (set) { + var uid = Util.uid(); + if (set[uid]) { return unusedUid(); } + return uid; +}; + +// return an existing session, creating one if it does not already exist +var lookup = function (map, id) { + return (map[id] = map[id] || { + //blocking: [], + active: {}, + blocked: {}, + }); +}; + +var isEmpty = function (map) { + for (var key in map) { + if (map.hasOwnProperty(key)) { return false; } + } + return true; +}; + +module.exports = function () { + // every scheduler instance has its own queue + var queue = WriteQueue(); + + // ordered tasks don't require any extra logic + var Ordered = function (id, task) { + queue(id, task); + }; + + // unordered and blocking tasks need a little extra state + var map = {}; + + // regular garbage collection keeps memory consumption low + var collectGarbage = function (id) { + // avoid using 'lookup' since it creates a session implicitly + var local = map[id]; + // bail out if no session + if (!local) { return; } + // bail out if there are blocking or active tasks + if (local.lock) { return; } + if (!isEmpty(local.active)) { return; } + // if there are no pending actions then delete the session + delete map[id]; + }; + + // unordered tasks run immediately if there are no blocking tasks scheduled + // or immediately after blocking tasks finish + var runImmediately = function (local, task) { + // set a flag in the map of active unordered tasks + // to prevent blocking tasks from running until you finish + var uid = unusedUid(local.active); + local.active[uid] = true; + + task(function () { + // remove the flag you set to indicate that your task completed + delete local.active[uid]; + // don't do anything if other unordered tasks are still running + if (!isEmpty(local.active)) { return; } + // bail out if there are no blocking tasks scheduled or ready + if (typeof(local.waiting) !== 'function') { + return void collectGarbage(); + } + setTimeout(local.waiting); + }); + }; + + var runOnceUnblocked = function (local, task) { + var uid = unusedUid(local.blocked); + local.blocked[uid] = function () { + runImmediately(local, task); + }; + }; + + // 'unordered' tasks are scheduled to run in after the most recently received blocking task + // or immediately and in parallel if there are no blocking tasks scheduled. + var Unordered = function (id, task) { + var local = lookup(map, id); + if (local.lock) { return runOnceUnblocked(local, task); } + runImmediately(local, task); + }; + + var runBlocked = function (local) { + for (var task in local.blocked) { + runImmediately(local, local.blocked[task]); + } + }; + + // 'blocking' tasks must be run alone. + // They are queued alongside ordered tasks, + // and wait until any running 'unordered' tasks complete before commencing. + var Blocking = function (id, task) { + var local = lookup(map, id); + + queue(id, function (next) { + // start right away if there are no running unordered tasks + if (isEmpty(local.active)) { + local.lock = true; + return void task(function () { + delete local.lock; + runBlocked(local); + next(); + }); + } + // otherwise wait until the running tasks have completed + local.waiting = function () { + local.lock = true; + task(function () { + delete local.lock; + delete local.waiting; + runBlocked(local); + next(); + }); + }; + }); + }; + + return { + ordered: Ordered, + unordered: Unordered, + blocking: Blocking, + }; +}; diff --git a/storage/README.md b/lib/storage/README.md similarity index 100% rename from storage/README.md rename to lib/storage/README.md diff --git a/storage/blob.js b/lib/storage/blob.js similarity index 99% rename from storage/blob.js rename to lib/storage/blob.js index c9396d12a..006f5ca80 100644 --- a/storage/blob.js +++ b/lib/storage/blob.js @@ -6,7 +6,7 @@ var Path = require("path"); var BlobStore = module.exports; var nThen = require("nthen"); var Semaphore = require("saferphore"); -var Util = require("../lib/common-util"); +var Util = require("../common-util"); var isValidSafeKey = function (safeKey) { return typeof(safeKey) === 'string' && !/\//.test(safeKey) && safeKey.length === 44; diff --git a/storage/file.js b/lib/storage/file.js similarity index 68% rename from storage/file.js rename to lib/storage/file.js index bb65cff43..bc0830d4e 100644 --- a/storage/file.js +++ b/lib/storage/file.js @@ -6,11 +6,13 @@ var Fse = require("fs-extra"); var Path = require("path"); var nThen = require("nthen"); var Semaphore = require("saferphore"); -var Util = require("../lib/common-util"); -const Readline = require("readline"); -const ToPull = require('stream-to-pull-stream'); -const Pull = require('pull-stream'); +var Util = require("../common-util"); +var Meta = require("../metadata"); +var Extras = require("../hk-util"); +const readFileBin = require("../stream-file").readFileBin; + +const Schedule = require("../schedule"); const isValidChannelId = function (id) { return typeof(id) === 'string' && id.length >= 32 && id.length < 50 && @@ -37,6 +39,10 @@ var mkArchiveMetadataPath = function (env, channelId) { return Path.join(env.archiveRoot, 'datastore', channelId.slice(0, 2), channelId) + '.metadata.ndjson'; }; +var mkTempPath = function (env, channelId) { + return mkPath(env, channelId) + '.temp'; +}; + // pass in the path so we can reuse the same function for archived files var channelExists = function (filepath, cb) { Fs.stat(filepath, function (err, stat) { @@ -52,13 +58,27 @@ var channelExists = function (filepath, cb) { }); }; +// readMessagesBin asynchronously iterates over the messages in a channel log +// the handler for each message must call back to read more, which should mean +// that this function has a lower memory profile than our classic method +// of reading logs line by line. +// it also allows the handler to abort reading at any time +const readMessagesBin = (env, id, start, msgHandler, cb) => { + const stream = Fs.createReadStream(mkPath(env, id), { start: start }); + return void readFileBin(stream, msgHandler, function (err) { + try { stream.close(); } catch (err2) { } + cb(err); + }); +}; + // reads classic metadata from a channel log and aborts // returns undefined if the first message was not an object (not an array) var getMetadataAtPath = function (Env, path, _cb) { - var stream; + const stream = Fs.createReadStream(path, { start: 0 }); // cb implicitly destroys the stream, if it exists // and calls back asynchronously no more than once + /* var cb = Util.once(Util.both(function () { try { stream.destroy(); @@ -66,20 +86,26 @@ var getMetadataAtPath = function (Env, path, _cb) { return err; } }, Util.mkAsync(_cb))); + */ - // stream creation emit errors... probably ENOENT - stream = Fs.createReadStream(path, { encoding: 'utf8' }).on('error', cb); - - // stream lines - const rl = Readline.createInterface({ - input: stream, + var cb = Util.once(Util.mkAsync(_cb), function () { + throw new Error("Multiple Callbacks"); }); var i = 0; - rl - .on('line', function (line) { + return readFileBin(stream, function (msgObj, readMore, abort) { + const line = msgObj.buff.toString('utf8'); + + if (!line) { + return readMore(); + } + // metadata should always be on the first line or not exist in the channel at all - if (i++ > 0) { return void cb(); } + if (i++ > 0) { + console.log("aborting"); + abort(); + return void cb(); + } var metadata; try { metadata = JSON.parse(line); @@ -94,9 +120,10 @@ var getMetadataAtPath = function (Env, path, _cb) { // if you can't parse, that's bad return void cb("INVALID_METADATA"); } - }) - .on('close', cb) - .on('error', cb); + readMore(); + }, function (err) { + cb(err); + }); }; var closeChannel = function (env, channelName, cb) { @@ -140,27 +167,16 @@ var clearChannel = function (env, channelId, _cb) { }; /* readMessages is our classic method of reading messages from the disk - notably doesn't provide a means of aborting if you finish early + notably doesn't provide a means of aborting if you finish early. + Internally it uses readFileBin: to avoid duplicating code and to use less memory */ -var readMessages = function (path, msgHandler, cb) { - var remainder = ''; - var stream = Fs.createReadStream(path, { encoding: 'utf8' }); - var complete = function (err) { - var _cb = cb; - cb = undefined; - if (_cb) { _cb(err); } - }; - stream.on('data', function (chunk) { - var lines = chunk.split('\n'); - lines[0] = remainder + lines[0]; - remainder = lines.pop(); - lines.forEach(msgHandler); - }); - stream.on('end', function () { - msgHandler(remainder); - complete(); - }); - stream.on('error', function (e) { complete(e); }); +var readMessages = function (path, msgHandler, _cb) { + var stream = Fs.createReadStream(path, { start: 0}); + var cb = Util.once(Util.mkAsync(_cb)); + return readFileBin(stream, function (msgObj, readMore) { + msgHandler(msgObj.buff.toString('utf8')); + readMore(); + }, cb); }; /* getChannelMetadata @@ -178,22 +194,21 @@ var getChannelMetadata = function (Env, channelId, cb) { // low level method for getting just the dedicated metadata channel var getDedicatedMetadata = function (env, channelId, handler, cb) { var metadataPath = mkMetadataPath(env, channelId); - readMessages(metadataPath, function (line) { - if (!line) { return; } + var stream = Fs.createReadStream(metadataPath, {start: 0}); + readFileBin(stream, function (msgObj, readMore) { + var line = msgObj.buff.toString('utf8'); try { var parsed = JSON.parse(line); handler(null, parsed); - } catch (e) { - handler(e, line); + } catch (err) { + handler(err, line); } + readMore(); }, function (err) { - if (err) { - // ENOENT => there is no metadata log - if (err.code === 'ENOENT') { return void cb(); } - // otherwise stream errors? - return void cb(err); - } - cb(); + // ENOENT => there is no metadata log + if (!err || err.code === 'ENOENT') { return void cb(); } + // otherwise stream errors? + cb(err); }); }; @@ -258,75 +273,6 @@ var writeMetadata = function (env, channelId, data, cb) { }; -// transform a stream of arbitrarily divided data -// into a stream of buffers divided by newlines in the source stream -// TODO see if we could improve performance by using libnewline -const NEWLINE_CHR = ('\n').charCodeAt(0); -const mkBufferSplit = () => { - let remainder = null; - return Pull((read) => { - return (abort, cb) => { - read(abort, function (end, data) { - if (end) { - if (data) { console.log("mkBufferSplit() Data at the end"); } - cb(end, remainder ? [remainder, data] : [data]); - remainder = null; - return; - } - const queue = []; - for (;;) { - const offset = data.indexOf(NEWLINE_CHR); - if (offset < 0) { - remainder = remainder ? Buffer.concat([remainder, data]) : data; - break; - } - let subArray = data.slice(0, offset); - if (remainder) { - subArray = Buffer.concat([remainder, subArray]); - remainder = null; - } - queue.push(subArray); - data = data.slice(offset + 1); - } - cb(end, queue); - }); - }; - }, Pull.flatten()); -}; - -// return a streaming function which transforms buffers into objects -// containing the buffer and the offset from the start of the stream -const mkOffsetCounter = () => { - let offset = 0; - return Pull.map((buff) => { - const out = { offset: offset, buff: buff }; - // +1 for the eaten newline - offset += buff.length + 1; - return out; - }); -}; - -// readMessagesBin asynchronously iterates over the messages in a channel log -// the handler for each message must call back to read more, which should mean -// that this function has a lower memory profile than our classic method -// of reading logs line by line. -// it also allows the handler to abort reading at any time -const readMessagesBin = (env, id, start, msgHandler, cb) => { - const stream = Fs.createReadStream(mkPath(env, id), { start: start }); - let keepReading = true; - Pull( - ToPull.read(stream), - mkBufferSplit(), - mkOffsetCounter(), - Pull.asyncMap((data, moreCb) => { - msgHandler(data, moreCb, () => { keepReading = false; moreCb(); }); - }), - Pull.drain(() => (keepReading), (err) => { - cb((keepReading) ? err : undefined); - }) - ); -}; - // check if a file exists at $path var checkPath = function (path, callback) { Fs.stat(path, function (err) { @@ -420,6 +366,7 @@ var removeArchivedChannel = function (env, channelName, cb) { }); }; +// TODO use ../plan.js for a smaller memory footprint var listChannels = function (root, handler, cb) { // do twenty things at a time var sema = Semaphore.create(20); @@ -553,9 +500,6 @@ var listChannels = function (root, handler, cb) { // to an equivalent location in the cold storage directory var archiveChannel = function (env, channelName, cb) { // TODO close channels before archiving them? - if (!env.retainData) { - return void cb("ARCHIVES_DISABLED"); - } // ctime is the most reliable indicator of when a file was archived // because it is used to indicate changes to the files metadata @@ -738,8 +682,9 @@ export type ChainPadServer_ChannelInternal_t = { var getChannel = function ( env, id, - callback /*:(err:?Error, chan:?ChainPadServer_ChannelInternal_t)=>void*/ + _callback /*:(err:?Error, chan:?ChainPadServer_ChannelInternal_t)=>void*/ ) { + var callback = Util.once(Util.mkAsync(_callback)); if (env.channels[id]) { var chan = env.channels[id]; chan.atime = +new Date(); @@ -752,6 +697,8 @@ var getChannel = function ( } if (env.openFiles >= env.openFileLimit) { + // FIXME warn if this is the case? + // alternatively use graceful-fs to handle lots of concurrent reads // if you're running out of open files, asynchronously clean up expired files // do it on a shorter timeframe, though (half of normal) setTimeout(function () { @@ -779,23 +726,20 @@ var getChannel = function ( delete env.channels[id]; } if (!channel.writeStream) { - throw new Error("getChannel() complete called without channel writeStream"); + throw new Error("getChannel() complete called without channel writeStream"); // XXX } whenLoaded.forEach(function (wl) { wl(err, (err) ? undefined : channel); }); }; var fileExists; - var errorState; nThen(function (waitFor) { checkPath(path, waitFor(function (err, exists) { if (err) { - errorState = true; - complete(err); - return; + waitFor.abort(); + return void complete(err); } fileExists = exists; })); }).nThen(function (waitFor) { - if (errorState) { return; } var stream = channel.writeStream = Fs.createWriteStream(path, { flags: 'a' }); env.openFiles++; stream.on('open', waitFor()); @@ -811,7 +755,6 @@ var getChannel = function ( } }); }).nThen(function () { - if (errorState) { return; } complete(); }); }; @@ -836,6 +779,7 @@ var message = function (env, chanName, msg, cb) { }; // stream messages from a channel log +// TODO replace getMessages with readFileBin var getMessages = function (env, chanName, handler, cb) { getChannel(env, chanName, function (err, chan) { if (!chan) { @@ -867,40 +811,189 @@ var getMessages = function (env, chanName, handler, cb) { }); }; -/*:: -export type ChainPadServer_MessageObj_t = { buff: Buffer, offset: number }; -export type ChainPadServer_Storage_t = { - readMessagesBin: ( - channelName:string, - start:number, - asyncMsgHandler:(msg:ChainPadServer_MessageObj_t, moreCb:()=>void, abortCb:()=>void)=>void, - cb:(err:?Error)=>void - )=>void, - message: (channelName:string, content:string, cb:(err:?Error)=>void)=>void, - messageBin: (channelName:string, content:Buffer, cb:(err:?Error)=>void)=>void, - getMessages: (channelName:string, msgHandler:(msg:string)=>void, cb:(err:?Error)=>void)=>void, - removeChannel: (channelName:string, cb:(err:?Error)=>void)=>void, - closeChannel: (channelName:string, cb:(err:?Error)=>void)=>void, - flushUnusedChannels: (cb:()=>void)=>void, - getChannelSize: (channelName:string, cb:(err:?Error, size:?number)=>void)=>void, - getChannelMetadata: (channelName:string, cb:(err:?Error|string, data:?any)=>void)=>void, - clearChannel: (channelName:string, (err:?Error)=>void)=>void -}; -export type ChainPadServer_Config_t = { - verbose?: boolean, - filePath?: string, - channelExpirationMs?: number, - openFileLimit?: number +var trimChannel = function (env, channelName, hash, _cb) { + var cb = Util.once(Util.mkAsync(_cb)); + // this function is queued as a blocking action for the relevant channel + + // derive temporary file paths for metadata and log buffers + var tempChannelPath = mkTempPath(env, channelName); + + // derive production db paths + var channelPath = mkPath(env, channelName); + var metadataPath = mkMetadataPath(env, channelName); + + // derive archive paths + var archiveChannelPath = mkArchivePath(env, channelName); + var archiveMetadataPath = mkArchiveMetadataPath(env, channelName); + + var metadataReference = {}; + + var tempStream; + var ABORT; + + var cleanUp = function (cb) { + if (tempStream && !tempStream.closed) { + try { + tempStream.close(); + } catch (err) { } + } + + Fse.unlink(tempChannelPath, function (err) { + // proceed if deleted or if there was nothing to delete + if (!err || err.code === 'ENOENT') { return cb(); } + // else abort and call back with the error + cb(err); + }); + }; + + nThen(function (w) { + // close the file descriptor if it is open + closeChannel(env, channelName, w(function (err) { + if (err) { + w.abort(); + return void cb(err); + } + })); + }).nThen(function (w) { + cleanUp(w(function (err) { + if (err) { + w.abort(); + cb(err); + } + })); + }).nThen(function (w) { + // eat errors since loading the logger here would create a cyclical dependency + var lineHandler = Meta.createLineHandler(metadataReference, Util.noop); + + readMetadata(env, channelName, lineHandler, w(function (err) { + if (err) { + w.abort(); + return void cb(err); + } + // if there were no errors just fall through to the next block + })); + }).nThen(function (w) { + // create temp buffer writeStream + tempStream = Fs.createWriteStream(tempChannelPath, { + flags: 'a', + }); + tempStream.on('open', w()); + tempStream.on('error', function (err) { + w.abort(); + ABORT = true; + cleanUp(function () { + cb(err); + }); + }); + }).nThen(function (w) { + var i = 0; + var retain = false; + + var handler = function (msgObj, readMore, abort) { + if (ABORT) { return void abort(); } // XXX + // the first message might be metadata... ignore it if so + if (i++ === 0 && msgObj.buff.indexOf('{') === 0) { + return readMore(); + } + + var s_msg = msgObj.buff.toString('utf8'); + if (retain) { + // if this flag is set then you've already found + // the message you were looking for. + // write it to your temp buffer and keep going + return void tempStream.write(s_msg + '\n', function () { + readMore(); + }); + } + + var msg = Util.tryParse(s_msg); + if (!msg) { return void readMore(); } + var msgHash = Extras.getHash(msg[4]); + + if (msgHash === hash) { + // everything from this point on should be retained + retain = true; + return void tempStream.write(s_msg + '\n', function () { + readMore(); + }); + } + readMore(); + }; + + readMessagesBin(env, channelName, 0, handler, w(function (err) { + if (err) { + w.abort(); + return void cleanUp(function () { + // intentionally call back with main error + // not the cleanup error + cb(err); + }); + } + + if (!retain) { + // you never found the message you were looking for + // this whole operation is invalid... + // clean up, abort, and call back with an error + + w.abort(); + cleanUp(function () { + // intentionally call back with main error + // not the cleanup error + cb('HASH_NOT_FOUND'); + }); + } + })); + }).nThen(function (w) { + // copy existing channel to the archive + Fse.copy(channelPath, archiveChannelPath, w(function (err) { + if (!err || err.code === 'ENOENT') { return; } + w.abort(); + cleanUp(function () { + cb(err); + }); + })); + + // copy existing metadaata to the archive + Fse.copy(metadataPath, archiveMetadataPath, w(function (err) { + if (!err || err.code === 'ENOENT') { return; } + w.abort(); + cleanUp(function () { + cb(err); + }); + })); + }).nThen(function (w) { + // overwrite the existing metadata log with the current metadata state + Fs.writeFile(metadataPath, JSON.stringify(metadataReference.meta) + '\n', w(function (err) { + // this shouldn't happen, but if it does your channel might be messed up :( + if (err) { + w.abort(); + cb(err); + } + })); + + // overwrite the existing channel with the temp log + Fse.move(tempChannelPath, channelPath, { + overwrite: true, + }, w(function (err) { + // this shouldn't happen, but if it does your channel might be messed up :( + if (err) { + w.abort(); + cb(err); + } + })); + }).nThen(function () { + // clean up and call back with no error + // triggering a historyKeeper index cache eviction... + cleanUp(function () { + cb(); + }); + }); }; -*/ -module.exports.create = function ( - conf /*:ChainPadServer_Config_t*/, - cb /*:(store:ChainPadServer_Storage_t)=>void*/ -) { + +module.exports.create = function (conf, cb) { var env = { root: conf.filePath || './datastore', archiveRoot: conf.archivePath || './data/archive', - retainData: conf.retainData, channels: { }, channelExpirationMs: conf.channelExpirationMs || 30000, verbose: conf.verbose, @@ -909,6 +1002,24 @@ module.exports.create = function ( }; var it; + /* our scheduler prioritizes and executes tasks with respect + to all other tasks invoked with an identical key + (typically the id of the concerned channel) + + it assumes that all tasks can be categorized into three types + + 1. unordered tasks such as streaming reads which can take + a long time to complete. + + 2. ordered tasks such as appending to a file which does not + take very long, but where priority is important. + + 3. blocking tasks such as rewriting a file where it would be + dangerous to perform any other task concurrently. + + */ + var schedule = env.schedule = Schedule(); + nThen(function (w) { // make sure the store's directory exists Fse.mkdirp(env.root, PERMISSIVE, w(function (err) { @@ -928,43 +1039,80 @@ module.exports.create = function ( // write a new message to a log message: function (channelName, content, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - message(env, channelName, content, cb); + schedule.ordered(channelName, function (next) { + message(env, channelName, content, Util.both(cb, next)); + }); }, // iterate over all the messages in a log getMessages: function (channelName, msgHandler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getMessages(env, channelName, msgHandler, cb); + schedule.unordered(channelName, function (next) { + getMessages(env, channelName, msgHandler, Util.both(cb, next)); + }); }, // NEWER IMPLEMENTATIONS OF THE SAME THING // write a new message to a log messageBin: (channelName, content, cb) => { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - messageBin(env, channelName, content, cb); + schedule.ordered(channelName, function (next) { + messageBin(env, channelName, content, Util.both(cb, next)); + }); }, // iterate over the messages in a log readMessagesBin: (channelName, start, asyncMsgHandler, cb) => { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - readMessagesBin(env, channelName, start, asyncMsgHandler, cb); +// FIXME there is a race condition here +// historyKeeper reads the file to find the byte offset of the first interesting message +// then calls this function again to read from that point. +// If this task is in the queue already when the file is read again +// then that byte offset will have been invalidated +// and the resulting stream probably won't align with message boundaries. +// We can evict the cache in the callback but by that point it will be too late. +// Presumably we'll need to bury some of historyKeeper's logic into a filestore method +// in order to make index/read sequences atomic. +// Otherwise, we can add a new task type to the scheduler to take invalidation into account... +// either method introduces significant complexity. + schedule.unordered(channelName, function (next) { + readMessagesBin(env, channelName, start, asyncMsgHandler, Util.both(cb, next)); + }); }, // METHODS for deleting data // remove a channel and its associated metadata log if present removeChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - removeChannel(env, channelName, function (err) { - cb(err); +// FIXME there's another race condition here... +// when a remove and an append are scheduled in that order +// the remove will delete the channel's metadata (including its validateKey) +// then the append will recreate the channel and insert a message. +// clients that are connected to the channel via historyKeeper should be kicked out +// however, anyone that connects to that channel in the future will be able to read the +// signed message, but will not find its validate key... +// resulting in a junk/unusable document + schedule.ordered(channelName, function (next) { + removeChannel(env, channelName, Util.both(cb, next)); }); }, // remove a channel and its associated metadata log from the archive directory removeArchivedChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - removeArchivedChannel(env, channelName, cb); + schedule.ordered(channelName, function (next) { + removeArchivedChannel(env, channelName, Util.both(cb, next)); + }); }, // clear all data for a channel but preserve its metadata clearChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - clearChannel(env, channelName, cb); + schedule.ordered(channelName, function (next) { + clearChannel(env, channelName, Util.both(cb, next)); + }); + }, + trimChannel: function (channelName, hash, cb) { + if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } + schedule.blocking(channelName, function (next) { + trimChannel(env, channelName, hash, Util.both(cb, next)); + }); }, // check if a channel exists in the database @@ -972,47 +1120,85 @@ module.exports.create = function ( if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } // construct the path var filepath = mkPath(env, channelName); - channelExists(filepath, cb); +// (ansuz) I'm uncertain whether this task should be unordered or ordered. +// there's a round trip to the client (and possibly the user) before they decide +// to act on the information of whether there is already content present in this channel. +// so it's practically impossible to avoid race conditions where someone else creates +// some content before you. +// if that's the case, it's basically impossible that you'd generate the same signing key, +// and thus historykeeper should reject the signed messages of whoever loses the race. +// thus 'unordered' seems appropriate. + schedule.unordered(channelName, function (next) { + channelExists(filepath, Util.both(cb, next)); + }); }, // check if a channel exists in the archive isChannelArchived: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } // construct the path var filepath = mkArchivePath(env, channelName); - channelExists(filepath, cb); +// as with the method above, somebody might remove, restore, or overwrite an archive +// in the time that it takes to answer this query and to execute whatever follows. +// since it's impossible to win the race every time let's just make this 'unordered' + schedule.unordered(channelName, function (next) { + channelExists(filepath, Util.both(cb, next)); + }); }, // move a channel from the database to the archive, along with its metadata archiveChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - archiveChannel(env, channelName, cb); +// again, the semantics around archiving and appending are really muddy. +// so I'm calling this 'unordered' again + schedule.unordered(channelName, function (next) { + archiveChannel(env, channelName, Util.both(cb, next)); + }); }, // restore a channel from the archive to the database, along with its metadata restoreArchivedChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - unarchiveChannel(env, channelName, cb); +// archive restoration will fail if either a file or its metadata exists in the live db. +// so I'm calling this 'ordered' to give writes a chance to flush out. +// accidental conflicts are extremely unlikely since clients check the status +// of a previously known channel before joining. + schedule.ordered(channelName, function (next) { + unarchiveChannel(env, channelName, Util.both(cb, next)); + }); }, // METADATA METHODS // fetch the metadata for a channel getChannelMetadata: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getChannelMetadata(env, channelName, cb); +// The only thing that can invalid this method's results are channel archival, removal, or trimming. +// We want it to be fast, so let's make it unordered. + schedule.unordered(channelName, function (next) { + getChannelMetadata(env, channelName, Util.both(cb, next)); + }); }, // iterate over lines of metadata changes from a dedicated log readDedicatedMetadata: function (channelName, handler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - getDedicatedMetadata(env, channelName, handler, cb); +// Everything that modifies metadata also updates clients, so this can be 'unordered' + schedule.unordered(channelName, function (next) { + getDedicatedMetadata(env, channelName, handler, Util.both(cb, next)); + }); }, // iterate over multiple lines of metadata changes readChannelMetadata: function (channelName, handler, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - readMetadata(env, channelName, handler, cb); +// same logic as 'readDedicatedMetadata + schedule.unordered(channelName, function (next) { + readMetadata(env, channelName, handler, Util.both(cb, next)); + }); }, // write a new line to a metadata log writeMetadata: function (channelName, data, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - writeMetadata(env, channelName, data, cb); +// metadata writes are fast and should be applied in order + schedule.ordered(channelName, function (next) { + writeMetadata(env, channelName, data, Util.both(cb, next)); + }); }, // CHANNEL ITERATION @@ -1025,13 +1211,22 @@ module.exports.create = function ( getChannelSize: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - channelBytes(env, channelName, cb); +// this method should be really fast and it probably doesn't matter much +// if we get the size slightly before or after somebody writes a few hundred bytes to it. + schedule.ordered(channelName, function (next) { + channelBytes(env, channelName, Util.both(cb, next)); + }); }, // OTHER DATABASE FUNCTIONALITY // remove a particular channel from the cache closeChannel: function (channelName, cb) { if (!isValidChannelId(channelName)) { return void cb(new Error('EINVAL')); } - closeChannel(env, channelName, cb); +// It is most likely the case that the channel is inactive if we are trying to close it, +// thus it doesn't make much difference whether it's ordered or not. +// In any case, it will be re-opened if anyone tries to write to it. + schedule.ordered(channelName, function (next) { + closeChannel(env, channelName, Util.both(cb, next)); + }); }, // iterate over open channels and close any that are not active flushUnusedChannels: function (cb) { @@ -1039,7 +1234,10 @@ module.exports.create = function ( }, // write to a log file log: function (channelName, content, cb) { - message(env, channelName, content, cb); +// you probably want the events in your log to be in the correct order. + schedule.ordered(channelName, function (next) { + message(env, channelName, content, Util.both(cb, next)); + }); }, // shut down the database shutdown: function () { diff --git a/storage/tasks.js b/lib/storage/tasks.js similarity index 96% rename from storage/tasks.js rename to lib/storage/tasks.js index 2209b3d59..3c50f8ce7 100644 --- a/storage/tasks.js +++ b/lib/storage/tasks.js @@ -102,6 +102,7 @@ var list = Tasks.list = function (env, cb, migration) { root: env.root, error: e, }); + w.abort(); return void cb(e); } if (list.length === 0) { @@ -202,22 +203,6 @@ var expire = function (env, task, cb) { var Log = env.log; var args = task.slice(2); - if (!env.retainData) { - Log.info('DELETION_SCHEDULED_EXPIRATION', { - task: task, - }); - env.store.removeChannel(args[0], function (err) { - if (err) { - Log.error('DELETION_SCHEDULED_EXPIRATION_ERROR', { - task: task, - error: err, - }); - } - cb(); - }); - return; - } - Log.info('ARCHIVAL_SCHEDULED_EXPIRATION', { task: task, }); @@ -381,7 +366,6 @@ Tasks.create = function (config, cb) { root: config.taskPath || './tasks', log: config.log, store: config.store, - retainData: Boolean(config.retainData), }; // make sure the path exists... diff --git a/lib/stream-file.js b/lib/stream-file.js new file mode 100644 index 000000000..c3130365b --- /dev/null +++ b/lib/stream-file.js @@ -0,0 +1,84 @@ +/* jshint esversion: 6 */ +/* global Buffer */ + +const ToPull = require('stream-to-pull-stream'); +const Pull = require('pull-stream'); + +const Stream = module.exports; + +// transform a stream of arbitrarily divided data +// into a stream of buffers divided by newlines in the source stream +// TODO see if we could improve performance by using libnewline +const NEWLINE_CHR = ('\n').charCodeAt(0); +const mkBufferSplit = () => { + let remainder = null; + return Pull((read) => { + return (abort, cb) => { + read(abort, function (end, data) { + if (end) { + if (data) { console.log("mkBufferSplit() Data at the end"); } + cb(end, remainder ? [remainder, data] : [data]); + remainder = null; + return; + } + const queue = []; + for (;;) { + const offset = data.indexOf(NEWLINE_CHR); + if (offset < 0) { + remainder = remainder ? Buffer.concat([remainder, data]) : data; + break; + } + let subArray = data.slice(0, offset); + if (remainder) { + subArray = Buffer.concat([remainder, subArray]); + remainder = null; + } + queue.push(subArray); + data = data.slice(offset + 1); + } + cb(end, queue); + }); + }; + }, Pull.flatten()); +}; + +// return a streaming function which transforms buffers into objects +// containing the buffer and the offset from the start of the stream +const mkOffsetCounter = () => { + let offset = 0; + return Pull.map((buff) => { + const out = { offset: offset, buff: buff }; + // +1 for the eaten newline + offset += buff.length + 1; + return out; + }); +}; + +// readMessagesBin asynchronously iterates over the messages in a channel log +// the handler for each message must call back to read more, which should mean +// that this function has a lower memory profile than our classic method +// of reading logs line by line. +// it also allows the handler to abort reading at any time +Stream.readFileBin = (stream, msgHandler, cb) => { + //const stream = Fs.createReadStream(path, { start: start }); + let keepReading = true; + Pull( + ToPull.read(stream), + mkBufferSplit(), + mkOffsetCounter(), + Pull.asyncMap((data, moreCb) => { + msgHandler(data, moreCb, () => { + try { + stream.close(); + } catch (err) { + console.error("READ_FILE_BIN_ERR", err); + } + keepReading = false; + moreCb(); + }); + }), + Pull.drain(() => (keepReading), (err) => { + cb((keepReading) ? err : undefined); + }) + ); +}; diff --git a/package-lock.json b/package-lock.json index eb4668a33..935b5650c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,6 +1,6 @@ { "name": "cryptpad", - "version": "3.10.0", + "version": "3.14.0", "lockfileVersion": 1, "requires": true, "dependencies": { @@ -99,9 +99,9 @@ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "chainpad-crypto": { - "version": "0.2.2", - "resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.2.tgz", - "integrity": "sha512-7MJ7qPz/C4sJPsDhPMjdSRmliOCPoRO0XM1vUomcgXA6HINlW+if9AAt/H4q154nYhZ/b57njgC6cWgd/RDidg==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/chainpad-crypto/-/chainpad-crypto-0.2.4.tgz", + "integrity": "sha512-fWbVyeAv35vf/dkkQaefASlJcEfpEvfRI23Mtn+/TBBry7+LYNuJMXJiovVY35pfyw2+trKh1Py5Asg9vrmaVg==", "requires": { "tweetnacl": "git://github.com/dchest/tweetnacl-js.git#v0.12.2" }, @@ -113,14 +113,13 @@ } }, "chainpad-server": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-3.0.5.tgz", - "integrity": "sha512-USKOMSHsNjnme81Qy3nQ+ji9eCkBPokYH4T82LVHAI0aayTSCXcTPUDLVGDBCRqe8NsXU4io1WPXn1KiZwB8fA==", + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/chainpad-server/-/chainpad-server-4.0.6.tgz", + "integrity": "sha512-6riUtbSJWD/LMPiBgrWaV3CswI67MqP6zi6q8LrB7X6cMKpp/yjFW2p0apLKBjytqJ4DT27kpiLhaqOZJccN8A==", "requires": { - "nthen": "^0.1.8", + "nthen": "0.1.8", "pull-stream": "^3.6.9", "stream-to-pull-stream": "^1.7.3", - "tweetnacl": "~0.12.2", "ws": "^3.3.1" } }, @@ -161,9 +160,9 @@ "dev": true }, "commander": { - "version": "2.20.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.0.tgz", - "integrity": "sha512-7j2y+40w61zy6YC2iRNpUe/NwhNyoXrYpHMrSunaMG64nRnaf96zO/KMQR4OyN/UnE5KLyEBnKHd4aG3rskjpQ==", + "version": "2.20.3", + "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", "dev": true }, "concat-map": { @@ -241,9 +240,9 @@ } }, "dom-serializer": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.1.tgz", - "integrity": "sha512-sK3ujri04WyjwQXVoK4PU3y8ula1stq10GJZpqHIUgoGZdsGzAGu65BnU3d08aTVSvO7mGPZUc0wTEDL+qGE0Q==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-0.2.2.tgz", + "integrity": "sha512-2/xPb3ORsQ42nHYiSunXkDjPLBaEj/xTwUO4B7XCZQTRk7EBtTOPaygh10YAAh2OI1Qrp6NWfpAhzswj0ydt9g==", "dev": true, "requires": { "domelementtype": "^2.0.1", @@ -398,15 +397,9 @@ } }, "flatten": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.2.tgz", - "integrity": "sha1-2uRqnXj74lKSJYzB54CkHZXAN4I=", - "dev": true - }, - "flow-bin": { - "version": "0.59.0", - "resolved": "https://registry.npmjs.org/flow-bin/-/flow-bin-0.59.0.tgz", - "integrity": "sha512-yJDRffvby5mCTkbwOdXwiGDjeea8Z+BPVuP53/tHqHIZC+KtQD790zopVf7mHk65v+wRn+TZ7tkRSNA9oDmyLg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/flatten/-/flatten-1.0.3.tgz", + "integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg==", "dev": true }, "forwarded": { @@ -450,9 +443,9 @@ } }, "glob": { - "version": "7.1.4", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.4.tgz", - "integrity": "sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==", + "version": "7.1.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", + "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", "dev": true, "requires": { "fs.realpath": "^1.0.0", @@ -478,9 +471,9 @@ } }, "graceful-fs": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.2.tgz", - "integrity": "sha512-IItsdsea19BoLC7ELy13q1iJFNmd7ofZH5+X/pJr90/nRoPEX0DJo1dHDbgtYWOhJhcCgMDTOw84RZ72q6lB+Q==" + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz", + "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ==" }, "has-ansi": { "version": "2.0.0", @@ -593,9 +586,9 @@ "dev": true }, "jshint": { - "version": "2.10.2", - "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.10.2.tgz", - "integrity": "sha512-e7KZgCSXMJxznE/4WULzybCMNXNAd/bf5TSrvVEq78Q/K8ZwFpmBqQeDtNiHc3l49nV4E/+YeHU/JZjSUIrLAA==", + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/jshint/-/jshint-2.11.0.tgz", + "integrity": "sha512-ooaD/hrBPhu35xXW4gn+o3SOuzht73gdBuffgJzrZBJZPGgGiiTvJEgTyxFvBO2nz0+X1G6etF8SzUODTlLY6Q==", "dev": true, "requires": { "cli": "~1.0.0", @@ -635,9 +628,9 @@ "dev": true }, "readable-stream": { - "version": "2.3.6", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", - "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "version": "2.3.7", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", + "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", "dev": true, "requires": { "core-util-is": "~1.0.0", @@ -766,16 +759,16 @@ "integrity": "sha512-KI1+qOZu5DcW6wayYHSzR/tXKCDC5Om4s1z2QJjDULzLcmf3DvzS7oluY4HCTrc+9FiKmWUgeNLg7W3uIQvxtQ==" }, "mime-db": { - "version": "1.40.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.40.0.tgz", - "integrity": "sha512-jYdeOMPy9vnxEqFRRo6ZvTZ8d9oPb+k18PKoYNYUe2stVEBPPwsln/qWzdbmaIvnhZ9v2P+CuecK+fpUfsV2mA==" + "version": "1.43.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.43.0.tgz", + "integrity": "sha512-+5dsGEEovYbT8UY9yD7eE4XTc4UwJ1jBYlgaQQF38ENsKR3wj/8q8RFZrF9WIZpB2V1ArTVFUva8sAul1NzRzQ==" }, "mime-types": { - "version": "2.1.24", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.24.tgz", - "integrity": "sha512-WaFHS3MCl5fapm3oLxU4eYDw77IQM2ACcxQ9RIxfaC3ooc6PFuBMGZZsYpvoXS5D5QTWPieo1jjLdAm3TBP3cQ==", + "version": "2.1.26", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.26.tgz", + "integrity": "sha512-01paPWYgLrkqAyrlDorC1uDwl2p3qZT7yl806vW7DvDoxwXi46jsjFbg+WdwotBIk6/MbEhO/dh5aZ5sNj/dWQ==", "requires": { - "mime-db": "1.40.0" + "mime-db": "1.43.0" } }, "minimatch": { @@ -848,9 +841,9 @@ "dev": true }, "pako": { - "version": "1.0.10", - "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.10.tgz", - "integrity": "sha512-0DTvPVU3ed8+HNXOu5Bs+o//Mbdj9VNQMUOe9oKCwh8l0GNwpTDMKCWbRjgtD291AWnkAgkqA/LOnQS8AmS1tw==", + "version": "1.0.11", + "resolved": "https://registry.npmjs.org/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", "dev": true }, "parseurl": { @@ -1251,6 +1244,11 @@ "mime-types": "~2.1.24" } }, + "ulimit": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/ulimit/-/ulimit-0.0.2.tgz", + "integrity": "sha1-K1H53IOBrkECY2zsXrM4wmMFiKA=" + }, "ultron": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/ultron/-/ultron-1.1.1.tgz", @@ -1305,19 +1303,19 @@ } }, "xml2js": { - "version": "0.4.19", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.19.tgz", - "integrity": "sha512-esZnJZJOiJR9wWKMyuvSE1y6Dq5LCuJanqhxslH2bxM6duahNZ+HMpCLhBQGZkbX6xRf8x1Y2eJlgt2q3qo49Q==", + "version": "0.4.23", + "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz", + "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==", "dev": true, "requires": { "sax": ">=0.6.0", - "xmlbuilder": "~9.0.1" + "xmlbuilder": "~11.0.0" } }, "xmlbuilder": { - "version": "9.0.7", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-9.0.7.tgz", - "integrity": "sha1-Ey7mPS7FVlxVfiD0wi35rKaGsQ0=", + "version": "11.0.1", + "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", + "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", "dev": true } } diff --git a/package.json b/package.json index fa4353662..882baafdb 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "cryptpad", "description": "realtime collaborative visual editor with zero knowlege server", - "version": "3.10.0", + "version": "3.14.0", "license": "AGPL-3.0+", "repository": { "type": "git", @@ -13,7 +13,7 @@ }, "dependencies": { "chainpad-crypto": "^0.2.2", - "chainpad-server": "^3.0.5", + "chainpad-server": "^4.0.6", "express": "~4.16.0", "fs-extra": "^7.0.0", "get-folder-size": "^2.0.1", @@ -24,10 +24,10 @@ "sortify": "^1.0.4", "stream-to-pull-stream": "^1.7.2", "tweetnacl": "~0.12.2", + "ulimit": "0.0.2", "ws": "^3.3.1" }, "devDependencies": { - "flow-bin": "^0.59.0", "jshint": "^2.10.2", "less": "2.7.1", "lesshint": "^4.5.0", @@ -40,8 +40,8 @@ "package": "PACKAGE=1 node server.js", "lint": "jshint --config .jshintrc --exclude-path .jshintignore . && ./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/", "lint:js": "jshint --config .jshintrc --exclude-path .jshintignore .", + "lint:server": "jshint --config .jshintrc lib", "lint:less": "./node_modules/lesshint/bin/lesshint -c ./.lesshintrc ./customize.dist/src/less2/", - "flow": "./node_modules/.bin/flow", "test": "node scripts/TestSelenium.js", "test-rpc": "cd scripts/tests && node test-rpc", "template": "cd customize.dist/src && for page in ../index.html ../privacy.html ../terms.html ../about.html ../contact.html ../what-is-cryptpad.html ../features.html ../../www/login/index.html ../../www/register/index.html ../../www/user/index.html;do echo $page; cp template.html $page; done;", diff --git a/rpc.js b/rpc.js deleted file mode 100644 index fcd85a390..000000000 --- a/rpc.js +++ /dev/null @@ -1,1766 +0,0 @@ -/*@flow*/ -/*jshint esversion: 6 */ -/* Use Nacl for checking signatures of messages */ -var Nacl = require("tweetnacl/nacl-fast"); - -/* globals Buffer*/ -/* globals process */ - -var Fs = require("fs"); - -var Fse = require("fs-extra"); -var Path = require("path"); -var Https = require("https"); -const Package = require('./package.json'); -const Pinned = require('./scripts/pinned'); -const Saferphore = require("saferphore"); -const nThen = require("nthen"); -const getFolderSize = require("get-folder-size"); -const Pins = require("./lib/pins"); -const Meta = require("./lib/metadata"); -const WriteQueue = require("./lib/write-queue"); -const BatchRead = require("./lib/batch-read"); - -const Util = require("./lib/common-util"); -const escapeKeyCharacters = Util.escapeKeyCharacters; -const unescapeKeyCharacters = Util.unescapeKeyCharacters; -const mkEvent = Util.mkEvent; - -var RPC = module.exports; - -var Store = require("./storage/file"); -var BlobStore = require("./storage/blob"); - -var DEFAULT_LIMIT = 50 * 1024 * 1024; -var SESSION_EXPIRATION_TIME = 60 * 1000; - -var Log; - -var WARN = function (e, output) { - if (e && output) { - Log.warn(e, { - output: output, - message: String(e), - stack: new Error(e).stack, - }); - } -}; - -var isValidId = function (chan) { - return chan && chan.length && /^[a-zA-Z0-9=+-]*$/.test(chan) && - [32, 48].indexOf(chan.length) > -1; -}; - -var makeToken = function () { - return Number(Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)) - .toString(16); -}; - -var makeCookie = function (token) { - var time = (+new Date()); - time -= time % 5000; - - return [ - time, - process.pid, - token - ]; -}; - -var parseCookie = function (cookie) { - if (!(cookie && cookie.split)) { return null; } - - var parts = cookie.split('|'); - if (parts.length !== 3) { return null; } - - var c = {}; - c.time = new Date(parts[0]); - c.pid = Number(parts[1]); - c.seq = parts[2]; - return c; -}; - -var getSession = function (Sessions, key) { - var safeKey = escapeKeyCharacters(key); - if (Sessions[safeKey]) { - Sessions[safeKey].atime = +new Date(); - return Sessions[safeKey]; - } - var user = Sessions[safeKey] = {}; - user.atime = +new Date(); - user.tokens = [ - makeToken() - ]; - return user; -}; - -var isTooOld = function (time, now) { - return (now - time) > 300000; -}; - -var expireSession = function (Sessions, key) { - var session = Sessions[key]; - if (!session) { return; } - if (session.blobstage) { - session.blobstage.close(); - } - delete Sessions[key]; -}; - -var expireSessions = function (Sessions) { - var now = +new Date(); - Object.keys(Sessions).forEach(function (key) { - var session = Sessions[key]; - if (session && isTooOld(session.atime, now)) { - expireSession(Sessions, key); - } - }); -}; - -var addTokenForKey = function (Sessions, publicKey, token) { - if (!Sessions[publicKey]) { throw new Error('undefined user'); } - - var user = getSession(Sessions, publicKey); - user.tokens.push(token); - user.atime = +new Date(); - if (user.tokens.length > 2) { user.tokens.shift(); } -}; - -var isValidCookie = function (Sessions, publicKey, cookie) { - var parsed = parseCookie(cookie); - if (!parsed) { return false; } - - var now = +new Date(); - - if (!parsed.time) { return false; } - if (isTooOld(parsed.time, now)) { - return false; - } - - // different process. try harder - if (process.pid !== parsed.pid) { - return false; - } - - var user = getSession(Sessions, publicKey); - if (!user) { return false; } - - var idx = user.tokens.indexOf(parsed.seq); - if (idx === -1) { return false; } - - if (idx > 0) { - // make a new token - addTokenForKey(Sessions, publicKey, makeToken()); - } - - return true; -}; - -var checkSignature = function (signedMsg, signature, publicKey) { - if (!(signedMsg && publicKey)) { return false; } - - var signedBuffer; - var pubBuffer; - var signatureBuffer; - - try { - signedBuffer = Nacl.util.decodeUTF8(signedMsg); - } catch (e) { - Log.error('INVALID_SIGNED_BUFFER', signedMsg); - return null; - } - - try { - pubBuffer = Nacl.util.decodeBase64(publicKey); - } catch (e) { - return false; - } - - try { - signatureBuffer = Nacl.util.decodeBase64(signature); - } catch (e) { - return false; - } - - if (pubBuffer.length !== 32) { - Log.error('PUBLIC_KEY_LENGTH', publicKey); - return false; - } - - if (signatureBuffer.length !== 64) { - return false; - } - - return Nacl.sign.detached.verify(signedBuffer, signatureBuffer, pubBuffer); -}; - -const batchUserPins = BatchRead("LOAD_USER_PINS"); -var loadUserPins = function (Env, publicKey, cb) { - var session = getSession(Env.Sessions, publicKey); - - if (session.channels) { - return cb(session.channels); - } - - batchUserPins(publicKey, cb, function (done) { - var ref = {}; - var lineHandler = Pins.createLineHandler(ref, function (label, data) { - Log.error(label, { - log: publicKey, - data: data, - }); - }); - - // if channels aren't in memory. load them from disk - Env.pinStore.getMessages(publicKey, lineHandler, function () { - // no more messages - - // only put this into the cache if it completes - session.channels = ref.pins; - done(ref.pins); // FIXME no error handling? - }); - }); -}; - -var truthyKeys = function (O) { - return Object.keys(O).filter(function (k) { - return O[k]; - }); -}; - -var getChannelList = function (Env, publicKey, _cb) { - var cb = Util.once(Util.mkAsync(_cb)); - loadUserPins(Env, publicKey, function (pins) { - cb(truthyKeys(pins)); - }); -}; - -var getFileSize = function (Env, channel, _cb) { - var cb = Util.once(Util.mkAsync(_cb)); - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length === 32) { - if (typeof(Env.msgStore.getChannelSize) !== 'function') { - return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); - } - - return void Env.msgStore.getChannelSize(channel, function (e, size /*:number*/) { - if (e) { - if (e.code === 'ENOENT') { return void cb(void 0, 0); } - return void cb(e.code); - } - cb(void 0, size); - }); - } - - // 'channel' refers to a file, so you need another API - Env.blobStore.size(channel, function (e, size) { - if (typeof(size) === 'undefined') { return void cb(e); } - cb(void 0, size); - }); -}; - -const batchMetadata = BatchRead("GET_METADATA"); -var getMetadata = function (Env, channel, cb) { - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length !== 32) { return cb("INVALID_CHAN_LENGTH"); } - - batchMetadata(channel, cb, function (done) { - var ref = {}; - var lineHandler = Meta.createLineHandler(ref, Log.error); - - return void Env.msgStore.readChannelMetadata(channel, lineHandler, function (err) { - if (err) { - // stream errors? - return void done(err); - } - done(void 0, ref.meta); - }); - }); -}; - -/* setMetadata - - write a new line to the metadata log if a valid command is provided - - data is an object: { - channel: channelId, - command: metadataCommand (string), - value: value - } -*/ -var queueMetadata = WriteQueue(); -var setMetadata = function (Env, data, unsafeKey, cb) { - var channel = data.channel; - var command = data.command; - if (!channel || !isValidId(channel)) { return void cb ('INVALID_CHAN'); } - if (!command || typeof (command) !== 'string') { return void cb ('INVALID_COMMAND'); } - if (Meta.commands.indexOf(command) === -1) { return void('UNSUPPORTED_COMMAND'); } - - queueMetadata(channel, function (next) { - getMetadata(Env, channel, function (err, metadata) { - if (err) { - cb(err); - return void next(); - } - if (!(metadata && Array.isArray(metadata.owners))) { - cb('E_NO_OWNERS'); - return void next(); - } - - // Confirm that the channel is owned by the user in question - // or the user is accepting a pending ownerhsip offer - if (metadata.pending_owners && Array.isArray(metadata.pending_owners) && - metadata.pending_owners.indexOf(unsafeKey) !== -1 && - metadata.owners.indexOf(unsafeKey) === -1) { - - // If you are a pending owner, make sure you can only add yourelf as an owner - if ((command !== 'ADD_OWNERS' && command !== 'RM_PENDING_OWNERS') - || !Array.isArray(data.value) - || data.value.length !== 1 - || data.value[0] !== unsafeKey) { - cb('INSUFFICIENT_PERMISSIONS'); - return void next(); - } - - } else if (metadata.owners.indexOf(unsafeKey) === -1) { - cb('INSUFFICIENT_PERMISSIONS'); - return void next(); - } - - // Add the new metadata line - var line = [command, data.value, +new Date()]; - var changed = false; - try { - changed = Meta.handleCommand(metadata, line); - } catch (e) { - cb(e); - return void next(); - } - - // if your command is valid but it didn't result in any change to the metadata, - // call back now and don't write any "useless" line to the log - if (!changed) { - cb(void 0, metadata); - return void next(); - } - Env.msgStore.writeMetadata(channel, JSON.stringify(line), function (e) { - if (e) { - cb(e); - return void next(); - } - cb(void 0, metadata); - next(); - }); - }); - }); -}; - -var getMultipleFileSize = function (Env, channels, cb) { - if (!Array.isArray(channels)) { return cb('INVALID_PIN_LIST'); } - if (typeof(Env.msgStore.getChannelSize) !== 'function') { - return cb('GET_CHANNEL_SIZE_UNSUPPORTED'); - } - - var i = channels.length; - var counts = {}; - - var done = function () { - i--; - if (i === 0) { return cb(void 0, counts); } - }; - - channels.forEach(function (channel) { - getFileSize(Env, channel, function (e, size) { - if (e) { - // most likely error here is that a file no longer exists - // but a user still has it in their drive, and wants to know - // its size. We should find a way to inform them of this in - // the future. For now we can just tell them it has no size. - - //WARN('getFileSize', e); - counts[channel] = 0; - return done(); - } - counts[channel] = size; - done(); - }); - }); -}; - -/* accepts a list, and returns a sublist of channel or file ids which seem - to have been deleted from the server (file size 0) - - we might consider that we should only say a file is gone if fs.stat returns - ENOENT, but for now it's simplest to just rely on getFileSize... -*/ -var getDeletedPads = function (Env, channels, cb) { - if (!Array.isArray(channels)) { return cb('INVALID_LIST'); } - var L = channels.length; - - var sem = Saferphore.create(10); - var absentees = []; - - var job = function (channel, wait) { - return function (give) { - getFileSize(Env, channel, wait(give(function (e, size) { - if (e) { return; } - if (size === 0) { absentees.push(channel); } - }))); - }; - }; - - nThen(function (w) { - for (var i = 0; i < L; i++) { - sem.take(job(channels[i], w)); - } - }).nThen(function () { - cb(void 0, absentees); - }); -}; - -const batchTotalSize = BatchRead("GET_TOTAL_SIZE"); -var getTotalSize = function (Env, publicKey, cb) { - var unescapedKey = unescapeKeyCharacters(publicKey); - var limit = Env.limits[unescapedKey]; - - // Get a common key if multiple users share the same quota, otherwise take the public key - var batchKey = (limit && Array.isArray(limit.users)) ? limit.users.join('') : publicKey; - - batchTotalSize(batchKey, cb, function (done) { - var channels = []; - var bytes = 0; - nThen(function (waitFor) { - // Get the channels list for our user account - getChannelList(Env, publicKey, waitFor(function (_channels) { - if (!_channels) { - waitFor.abort(); - return done('INVALID_PIN_LIST'); - } - Array.prototype.push.apply(channels, _channels); - })); - // Get the channels list for users sharing our quota - if (limit && Array.isArray(limit.users) && limit.users.length > 1) { - limit.users.forEach(function (key) { - if (key === unescapedKey) { return; } // Don't count ourselves twice - getChannelList(Env, key, waitFor(function (_channels) { - if (!_channels) { return; } // Broken user, don't count their quota - Array.prototype.push.apply(channels, _channels); - })); - }); - } - }).nThen(function (waitFor) { - // Get size of the channels - var list = []; // Contains the channels already counted in the quota to avoid duplicates - channels.forEach(function (channel) { // TODO semaphore? - if (list.indexOf(channel) !== -1) { return; } - list.push(channel); - getFileSize(Env, channel, waitFor(function (e, size) { - if (!e) { bytes += size; } - })); - }); - }).nThen(function () { - done(void 0, bytes); - }); - }); -}; - -var hashChannelList = function (A) { - var uniques = []; - - A.forEach(function (a) { - if (uniques.indexOf(a) === -1) { uniques.push(a); } - }); - uniques.sort(); - - var hash = Nacl.util.encodeBase64(Nacl.hash(Nacl - .util.decodeUTF8(JSON.stringify(uniques)))); - - return hash; -}; - -var getHash = function (Env, publicKey, cb) { - getChannelList(Env, publicKey, function (channels) { - cb(void 0, hashChannelList(channels)); - }); -}; - -var applyCustomLimits = function (Env, config) { - var isLimit = function (o) { - var valid = o && typeof(o) === 'object' && - typeof(o.limit) === 'number' && - typeof(o.plan) === 'string' && - typeof(o.note) === 'string'; - return valid; - }; - - // read custom limits from the config - var customLimits = (function (custom) { - var limits = {}; - Object.keys(custom).forEach(function (k) { - k.replace(/\/([^\/]+)$/, function (all, safeKey) { - var id = unescapeKeyCharacters(safeKey || ''); - limits[id] = custom[k]; - return ''; - }); - }); - return limits; - }(config.customLimits || {})); - - Object.keys(customLimits).forEach(function (k) { - if (!isLimit(customLimits[k])) { return; } - Env.limits[k] = customLimits[k]; - }); -}; - -// The limits object contains storage limits for all the publicKey that have paid -// To each key is associated an object containing the 'limit' value and a 'note' explaining that limit -var updateLimits = function (Env, config, publicKey, cb /*:(?string, ?any[])=>void*/) { // FIXME BATCH? - - if (config.adminEmail === false) { - applyCustomLimits(Env, config); - if (config.allowSubscriptions === false) { return; } - throw new Error("allowSubscriptions must be false if adminEmail is false"); - } - if (typeof cb !== "function") { cb = function () {}; } - - var defaultLimit = typeof(config.defaultStorageLimit) === 'number'? - config.defaultStorageLimit: DEFAULT_LIMIT; - - var userId; - if (publicKey) { - userId = unescapeKeyCharacters(publicKey); - } - - var body = JSON.stringify({ - domain: config.myDomain, - subdomain: config.mySubdomain || null, - adminEmail: config.adminEmail, - version: Package.version - }); - var options = { - host: 'accounts.cryptpad.fr', - path: '/api/getauthorized', - method: 'POST', - headers: { - "Content-Type": "application/json", - "Content-Length": Buffer.byteLength(body) - } - }; - - var req = Https.request(options, function (response) { - if (!('' + response.statusCode).match(/^2\d\d$/)) { - return void cb('SERVER ERROR ' + response.statusCode); - } - var str = ''; - - response.on('data', function (chunk) { - str += chunk; - }); - - response.on('end', function () { - try { - var json = JSON.parse(str); - Env.limits = json; - applyCustomLimits(Env, config); - - var l; - if (userId) { - var limit = Env.limits[userId]; - l = limit && typeof limit.limit === "number" ? - [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; - } - cb(void 0, l); - } catch (e) { - cb(e); - } - }); - }); - - req.on('error', function (e) { - applyCustomLimits(Env, config); - if (!config.domain) { return cb(); } - cb(e); - }); - - req.end(body); -}; - -var getLimit = function (Env, publicKey, cb) { - var unescapedKey = unescapeKeyCharacters(publicKey); - var limit = Env.limits[unescapedKey]; - var defaultLimit = typeof(Env.defaultStorageLimit) === 'number'? - Env.defaultStorageLimit: DEFAULT_LIMIT; - - var toSend = limit && typeof(limit.limit) === "number"? - [limit.limit, limit.plan, limit.note] : [defaultLimit, '', '']; - - cb(void 0, toSend); -}; - -var getFreeSpace = function (Env, publicKey, cb) { - getLimit(Env, publicKey, function (e, limit) { - if (e) { return void cb(e); } - getTotalSize(Env, publicKey, function (e, size) { - if (typeof(size) === 'undefined') { return void cb(e); } - - var rem = limit[0] - size; - if (typeof(rem) !== 'number') { - return void cb('invalid_response'); - } - cb(void 0, rem); - }); - }); -}; - -var sumChannelSizes = function (sizes) { - return Object.keys(sizes).map(function (id) { return sizes[id]; }) - .filter(function (x) { - // only allow positive numbers - return !(typeof(x) !== 'number' || x <= 0); - }) - .reduce(function (a, b) { return a + b; }, 0); -}; - -// inform that the -var loadChannelPins = function (Env) { - Pinned.load(function (err, data) { - if (err) { - Log.error("LOAD_CHANNEL_PINS", err); - - // FIXME not sure what should be done here instead - Env.pinnedPads = {}; - Env.evPinnedPadsReady.fire(); - return; - } - - - Env.pinnedPads = data; - Env.evPinnedPadsReady.fire(); - }, { - pinPath: Env.paths.pin, - }); -}; -var addPinned = function ( - Env, - publicKey /*:string*/, - channelList /*Array*/, - cb /*:()=>void*/) -{ - Env.evPinnedPadsReady.reg(() => { - channelList.forEach((c) => { - const x = Env.pinnedPads[c] = Env.pinnedPads[c] || {}; - x[publicKey] = 1; - }); - cb(); - }); -}; -var removePinned = function ( - Env, - publicKey /*:string*/, - channelList /*Array*/, - cb /*:()=>void*/) -{ - Env.evPinnedPadsReady.reg(() => { - channelList.forEach((c) => { - const x = Env.pinnedPads[c]; - if (!x) { return; } - delete x[publicKey]; - }); - cb(); - }); -}; -var isChannelPinned = function (Env, channel, cb) { - Env.evPinnedPadsReady.reg(() => { - if (Env.pinnedPads[channel] && Object.keys(Env.pinnedPads[channel]).length) { - cb(true); - } else { - delete Env.pinnedPads[channel]; - cb(false); - } - }); -}; - -var pinChannel = function (Env, publicKey, channels, cb) { - if (!channels && channels.filter) { - return void cb('INVALID_PIN_LIST'); - } - - // get channel list ensures your session has a cached channel list - getChannelList(Env, publicKey, function (pinned) { - var session = getSession(Env.Sessions, publicKey); - - // only pin channels which are not already pinned - var toStore = channels.filter(function (channel) { - return pinned.indexOf(channel) === -1; - }); - - if (toStore.length === 0) { - return void getHash(Env, publicKey, cb); - } - - getMultipleFileSize(Env, toStore, function (e, sizes) { - if (typeof(sizes) === 'undefined') { return void cb(e); } - var pinSize = sumChannelSizes(sizes); - - getFreeSpace(Env, publicKey, function (e, free) { - if (typeof(free) === 'undefined') { - WARN('getFreeSpace', e); - return void cb(e); - } - if (pinSize > free) { return void cb('E_OVER_LIMIT'); } - - Env.pinStore.message(publicKey, JSON.stringify(['PIN', toStore, +new Date()]), - function (e) { - if (e) { return void cb(e); } - toStore.forEach(function (channel) { - session.channels[channel] = true; - }); - addPinned(Env, publicKey, toStore, () => {}); - getHash(Env, publicKey, cb); - }); - }); - }); - }); -}; - -var unpinChannel = function (Env, publicKey, channels, cb) { - if (!channels && channels.filter) { - // expected array - return void cb('INVALID_PIN_LIST'); - } - - getChannelList(Env, publicKey, function (pinned) { - var session = getSession(Env.Sessions, publicKey); - - // only unpin channels which are pinned - var toStore = channels.filter(function (channel) { - return pinned.indexOf(channel) !== -1; - }); - - if (toStore.length === 0) { - return void getHash(Env, publicKey, cb); - } - - Env.pinStore.message(publicKey, JSON.stringify(['UNPIN', toStore, +new Date()]), - function (e) { - if (e) { return void cb(e); } - toStore.forEach(function (channel) { - delete session.channels[channel]; - }); - removePinned(Env, publicKey, toStore, () => {}); - getHash(Env, publicKey, cb); - }); - }); -}; - -var resetUserPins = function (Env, publicKey, channelList, cb) { - if (!Array.isArray(channelList)) { return void cb('INVALID_PIN_LIST'); } - var session = getSession(Env.Sessions, publicKey); - - if (!channelList.length) { - return void getHash(Env, publicKey, function (e, hash) { - if (e) { return cb(e); } - cb(void 0, hash); - }); - } - - var pins = {}; - getMultipleFileSize(Env, channelList, function (e, sizes) { - if (typeof(sizes) === 'undefined') { return void cb(e); } - var pinSize = sumChannelSizes(sizes); - - - getLimit(Env, publicKey, function (e, limit) { - if (e) { - WARN('[RESET_ERR]', e); - return void cb(e); - } - - /* we want to let people pin, even if they are over their limit, - but they should only be able to do this once. - - This prevents data loss in the case that someone registers, but - does not have enough free space to pin their migrated data. - - They will not be able to pin additional pads until they upgrade - or delete enough files to go back under their limit. */ - if (pinSize > limit[0] && session.hasPinned) { return void(cb('E_OVER_LIMIT')); } - Env.pinStore.message(publicKey, JSON.stringify(['RESET', channelList, +new Date()]), - function (e) { - if (e) { return void cb(e); } - channelList.forEach(function (channel) { - pins[channel] = true; - }); - - var oldChannels; - if (session.channels && typeof(session.channels) === 'object') { - oldChannels = Object.keys(session.channels); - } else { - oldChannels = []; - } - removePinned(Env, publicKey, oldChannels, () => { - addPinned(Env, publicKey, channelList, ()=>{}); - }); - - // update in-memory cache IFF the reset was allowed. - session.channels = pins; - getHash(Env, publicKey, function (e, hash) { - cb(e, hash); - }); - }); - }); - }); -}; - -var clearOwnedChannel = function (Env, channelId, unsafeKey, cb) { - if (typeof(channelId) !== 'string' || channelId.length !== 32) { - return cb('INVALID_ARGUMENTS'); - } - - getMetadata(Env, channelId, function (err, metadata) { - if (err) { return void cb(err); } - if (!(metadata && Array.isArray(metadata.owners))) { return void cb('E_NO_OWNERS'); } - // Confirm that the channel is owned by the user in question - if (metadata.owners.indexOf(unsafeKey) === -1) { - return void cb('INSUFFICIENT_PERMISSIONS'); - } - // FIXME COLDSTORAGE - return void Env.msgStore.clearChannel(channelId, function (e) { - cb(e); - }); - }); -}; - -var removeOwnedChannel = function (Env, channelId, unsafeKey, cb) { - if (typeof(channelId) !== 'string' || !isValidId(channelId)) { - return cb('INVALID_ARGUMENTS'); - } - - if (Env.blobStore.isFileId(channelId)) { - var safeKey = escapeKeyCharacters(unsafeKey); - var blobId = channelId; - - return void nThen(function (w) { - // check if you have permissions - Env.blobStore.isOwnedBy(safeKey, blobId, w(function (err, owned) { - if (err || !owned) { - w.abort(); - return void cb("INSUFFICIENT_PERMISSIONS"); - } - })); - }).nThen(function (w) { - // remove the blob - - if (Env.retainData) { - return void Env.blobStore.archive.blob(blobId, w(function (err) { - Log.info('ARCHIVAL_OWNED_FILE_BY_OWNER_RPC', { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - w.abort(); - return void cb(err); - } - })); - } - Env.blobStore.remove.blob(blobId, w(function (err) { - Log.info('DELETION_OWNED_FILE_BY_OWNER_RPC', { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - w.abort(); - return void cb(err); - } - })); - }).nThen(function () { - // remove the proof - if (Env.retainData) { - return void Env.blobStore.archive.proof(safeKey, blobId, function (err) { - Log.info("ARCHIVAL_PROOF_REMOVAL_BY_OWNER_RPC", { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - return void cb("E_PROOF_REMOVAL"); - } - cb(); - }); - } - - Env.blobStore.remove.proof(safeKey, blobId, function (err) { - Log.info("DELETION_PROOF_REMOVAL_BY_OWNER_RPC", { - safeKey: safeKey, - blobId: blobId, - status: err? String(err): 'SUCCESS', - }); - if (err) { - return void cb("E_PROOF_REMOVAL"); - } - cb(); - }); - }); - } - - getMetadata(Env, channelId, function (err, metadata) { - if (err) { return void cb(err); } - if (!(metadata && Array.isArray(metadata.owners))) { return void cb('E_NO_OWNERS'); } - if (metadata.owners.indexOf(unsafeKey) === -1) { - return void cb('INSUFFICIENT_PERMISSIONS'); - } - // if the admin has configured data retention... - // temporarily archive the file instead of removing it - if (Env.retainData) { - return void Env.msgStore.archiveChannel(channelId, function (e) { - Log.info('ARCHIVAL_CHANNEL_BY_OWNER_RPC', { - unsafeKey: unsafeKey, - channelId: channelId, - status: e? String(e): 'SUCCESS', - }); - cb(e); - }); - } - - return void Env.msgStore.removeChannel(channelId, function (e) { - Log.info('DELETION_CHANNEL_BY_OWNER_RPC', { - unsafeKey: unsafeKey, - channelId: channelId, - status: e? String(e): 'SUCCESS', - }); - cb(e); - }); - }); -}; - -/* Users should be able to clear their own pin log with an authenticated RPC -*/ -var removePins = function (Env, safeKey, cb) { - if (typeof(Env.pinStore.removeChannel) !== 'function') { - return void cb("E_NOT_IMPLEMENTED"); - } - Env.pinStore.removeChannel(safeKey, function (err) { - Log.info('DELETION_PIN_BY_OWNER_RPC', { - safeKey: safeKey, - status: err? String(err): 'SUCCESS', - }); - - cb(err); - }); -}; - -/* - We assume that the server is secured against MitM attacks - via HTTPS, and that malicious actors do not have code execution - capabilities. If they do, we have much more serious problems. - - The capability to replay a block write or remove results in either - a denial of service for the user whose block was removed, or in the - case of a write, a rollback to an earlier password. - - Since block modification is destructive, this can result in loss - of access to the user's drive. - - So long as the detached signature is never observed by a malicious - party, and the server discards it after proof of knowledge, replays - are not possible. However, this precludes verification of the signature - at a later time. - - Despite this, an integrity check is still possible by the original - author of the block, since we assume that the block will have been - encrypted with xsalsa20-poly1305 which is authenticated. -*/ -var validateLoginBlock = function (Env, publicKey, signature, block, cb) { // FIXME BLOCKS - // convert the public key to a Uint8Array and validate it - if (typeof(publicKey) !== 'string') { return void cb('E_INVALID_KEY'); } - - var u8_public_key; - try { - u8_public_key = Nacl.util.decodeBase64(publicKey); - } catch (e) { - return void cb('E_INVALID_KEY'); - } - - var u8_signature; - try { - u8_signature = Nacl.util.decodeBase64(signature); - } catch (e) { - Log.error('INVALID_BLOCK_SIGNATURE', e); - return void cb('E_INVALID_SIGNATURE'); - } - - // convert the block to a Uint8Array - var u8_block; - try { - u8_block = Nacl.util.decodeBase64(block); - } catch (e) { - return void cb('E_INVALID_BLOCK'); - } - - // take its hash - var hash = Nacl.hash(u8_block); - - // validate the signature against the hash of the content - var verified = Nacl.sign.detached.verify(hash, u8_signature, u8_public_key); - - // existing authentication ensures that users cannot replay old blocks - - // call back with (err) if unsuccessful - if (!verified) { return void cb("E_COULD_NOT_VERIFY"); } - - return void cb(null, u8_block); -}; - -var createLoginBlockPath = function (Env, publicKey) { // FIXME BLOCKS - // prepare publicKey to be used as a file name - var safeKey = escapeKeyCharacters(publicKey); - - // validate safeKey - if (typeof(safeKey) !== 'string') { - return; - } - - // derive the full path - // /home/cryptpad/cryptpad/block/fg/fg32kefksjdgjkewrjksdfksjdfsdfskdjfsfd - return Path.join(Env.paths.block, safeKey.slice(0, 2), safeKey); -}; - -var writeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS - //console.log(msg); - var publicKey = msg[0]; - var signature = msg[1]; - var block = msg[2]; - - validateLoginBlock(Env, publicKey, signature, block, function (e, validatedBlock) { - if (e) { return void cb(e); } - if (!(validatedBlock instanceof Uint8Array)) { return void cb('E_INVALID_BLOCK'); } - - // derive the filepath - var path = createLoginBlockPath(Env, publicKey); - - // make sure the path is valid - if (typeof(path) !== 'string') { - return void cb('E_INVALID_BLOCK_PATH'); - } - - var parsed = Path.parse(path); - if (!parsed || typeof(parsed.dir) !== 'string') { - return void cb("E_INVALID_BLOCK_PATH_2"); - } - - nThen(function (w) { - // make sure the path to the file exists - Fse.mkdirp(parsed.dir, w(function (e) { - if (e) { - w.abort(); - cb(e); - } - })); - }).nThen(function () { - // actually write the block - - // flow is dumb and I need to guard against this which will never happen - /*:: if (typeof(validatedBlock) === 'undefined') { throw new Error('should never happen'); } */ - /*:: if (typeof(path) === 'undefined') { throw new Error('should never happen'); } */ - Fs.writeFile(path, Buffer.from(validatedBlock), { encoding: "binary", }, function (err) { - if (err) { return void cb(err); } - cb(); - }); - }); - }); -}; - -/* - When users write a block, they upload the block, and provide - a signature proving that they deserve to be able to write to - the location determined by the public key. - - When removing a block, there is nothing to upload, but we need - to sign something. Since the signature is considered sensitive - information, we can just sign some constant and use that as proof. - -*/ -var removeLoginBlock = function (Env, msg, cb) { // FIXME BLOCKS - var publicKey = msg[0]; - var signature = msg[1]; - var block = Nacl.util.decodeUTF8('DELETE_BLOCK'); // clients and the server will have to agree on this constant - - validateLoginBlock(Env, publicKey, signature, block, function (e /*::, validatedBlock */) { - if (e) { return void cb(e); } - // derive the filepath - var path = createLoginBlockPath(Env, publicKey); - - // make sure the path is valid - if (typeof(path) !== 'string') { - return void cb('E_INVALID_BLOCK_PATH'); - } - - // FIXME COLDSTORAGE - Fs.unlink(path, function (err) { - Log.info('DELETION_BLOCK_BY_OWNER_RPC', { - publicKey: publicKey, - path: path, - status: err? String(err): 'SUCCESS', - }); - - if (err) { return void cb(err); } - cb(); - }); - }); -}; - -var ARRAY_LINE = /^\[/; - -/* Files can contain metadata but not content - call back with true if the channel log has no content other than metadata - otherwise false -*/ -var isNewChannel = function (Env, channel, cb) { - if (!isValidId(channel)) { return void cb('INVALID_CHAN'); } - if (channel.length !== 32) { return void cb('INVALID_CHAN'); } - - var done = false; - Env.msgStore.getMessages(channel, function (msg) { - if (done) { return; } - try { - if (typeof(msg) === 'string' && ARRAY_LINE.test(msg)) { - done = true; - return void cb(void 0, false); - } - } catch (e) { - WARN('invalid message read from store', e); - } - }, function () { - if (done) { return; } - // no more messages... - cb(void 0, true); - }); -}; - -/* writePrivateMessage - allows users to anonymously send a message to the channel - prevents their netflux-id from being stored in history - and from being broadcast to anyone that might currently be in the channel - - Otherwise behaves the same as sending to a channel -*/ -var writePrivateMessage = function (Env, args, nfwssCtx, cb) { - var channelId = args[0]; - var msg = args[1]; - - // don't bother handling empty messages - if (!msg) { return void cb("INVALID_MESSAGE"); } - - // don't support anything except regular channels - if (!isValidId(channelId) || channelId.length !== 32) { - return void cb("INVALID_CHAN"); - } - - // We expect a modern netflux-websocket-server instance - // if this API isn't here everything will fall apart anyway - if (!(nfwssCtx && nfwssCtx.historyKeeper && typeof(nfwssCtx.historyKeeper.onChannelMessage) === 'function')) { - return void cb("NOT_IMPLEMENTED"); - } - - // historyKeeper expects something with an 'id' attribute - // it will fail unless you provide it, but it doesn't need anything else - var channelStruct = { - id: channelId, - }; - - // construct a message to store and broadcast - var fullMessage = [ - 0, // idk - null, // normally the netflux id, null isn't rejected, and it distinguishes messages written in this way - "MSG", // indicate that this is a MSG - channelId, // channel id - msg // the actual message content. Generally a string - ]; - - // store the message and do everything else that is typically done when going through historyKeeper - nfwssCtx.historyKeeper.onChannelMessage(nfwssCtx, channelStruct, fullMessage); - - // call back with the message and the target channel. - // historyKeeper will take care of broadcasting it if anyone is in the channel - cb(void 0, { - channel: channelId, - message: fullMessage - }); -}; - -const batchDiskUsage = BatchRead("GET_DISK_USAGE"); -var getDiskUsage = function (Env, cb) { - batchDiskUsage('', cb, function (done) { - var data = {}; - nThen(function (waitFor) { - getFolderSize('./', waitFor(function(err, info) { - data.total = info; - })); - getFolderSize(Env.paths.pin, waitFor(function(err, info) { - data.pin = info; - })); - getFolderSize(Env.paths.blob, waitFor(function(err, info) { - data.blob = info; - })); - getFolderSize(Env.paths.staging, waitFor(function(err, info) { - data.blobstage = info; - })); - getFolderSize(Env.paths.block, waitFor(function(err, info) { - data.block = info; - })); - getFolderSize(Env.paths.data, waitFor(function(err, info) { - data.datastore = info; - })); - }).nThen(function () { - done(void 0, data); - }); - }); -}; - -const batchRegisteredUsers = BatchRead("GET_REGISTERED_USERS"); -var getRegisteredUsers = function (Env, cb) { - batchRegisteredUsers('', cb, function (done) { - var dir = Env.paths.pin; - var folders; - var users = 0; - nThen(function (waitFor) { - Fs.readdir(dir, waitFor(function (err, list) { - if (err) { - waitFor.abort(); - return void done(err); - } - folders = list; - })); - }).nThen(function (waitFor) { - folders.forEach(function (f) { - var dir = Env.paths.pin + '/' + f; - Fs.readdir(dir, waitFor(function (err, list) { - if (err) { return; } - users += list.length; - })); - }); - }).nThen(function () { - done(void 0, users); - }); - }); -}; -var getActiveSessions = function (Env, ctx, cb) { - var total = ctx.users ? Object.keys(ctx.users).length : '?'; - - var ips = []; - Object.keys(ctx.users).forEach(function (u) { - var user = ctx.users[u]; - var socket = user.socket; - var req = socket.upgradeReq; - var conn = req && req.connection; - var ip = (req && req.headers && req.headers['x-forwarded-for']) || (conn && conn.remoteAddress); - if (ip && ips.indexOf(ip) === -1) { - ips.push(ip); - } - }); - - cb (void 0, [total, ips.length]); -}; - -var adminCommand = function (Env, ctx, publicKey, config, data, cb) { - var admins = Env.admins; - if (admins.indexOf(publicKey) === -1) { - return void cb("FORBIDDEN"); - } - // Handle commands here - switch (data[0]) { - case 'ACTIVE_SESSIONS': - return getActiveSessions(Env, ctx, cb); - case 'ACTIVE_PADS': - return cb(void 0, ctx.channels ? Object.keys(ctx.channels).length : '?'); - case 'REGISTERED_USERS': - return getRegisteredUsers(Env, cb); - case 'DISK_USAGE': - return getDiskUsage(Env, cb); - case 'FLUSH_CACHE': - config.flushCache(); - return cb(void 0, true); - default: - return cb('UNHANDLED_ADMIN_COMMAND'); - } -}; - -var isUnauthenticatedCall = function (call) { - return [ - 'GET_FILE_SIZE', - 'GET_METADATA', - 'GET_MULTIPLE_FILE_SIZE', - 'IS_CHANNEL_PINNED', - 'IS_NEW_CHANNEL', - 'GET_HISTORY_OFFSET', - 'GET_DELETED_PADS', - 'WRITE_PRIVATE_MESSAGE', - ].indexOf(call) !== -1; -}; - -var isAuthenticatedCall = function (call) { - return [ - 'COOKIE', - 'RESET', - 'PIN', - 'UNPIN', - 'GET_HASH', - 'GET_TOTAL_SIZE', - 'UPDATE_LIMITS', - 'GET_LIMIT', - 'UPLOAD_STATUS', - 'UPLOAD_COMPLETE', - 'OWNED_UPLOAD_COMPLETE', - 'UPLOAD_CANCEL', - 'EXPIRE_SESSION', - 'CLEAR_OWNED_CHANNEL', - 'REMOVE_OWNED_CHANNEL', - 'REMOVE_PINS', - 'WRITE_LOGIN_BLOCK', - 'REMOVE_LOGIN_BLOCK', - 'ADMIN', - 'SET_METADATA' - ].indexOf(call) !== -1; -}; - -// upload_status -var upload_status = function (Env, safeKey, filesize, _cb) { // FIXME FILES - var cb = Util.once(Util.mkAsync(_cb)); - - // validate that the provided size is actually a positive number - if (typeof(filesize) !== 'number' && - filesize >= 0) { return void cb('E_INVALID_SIZE'); } - - if (filesize >= Env.maxUploadSize) { return cb('TOO_LARGE'); } - - nThen(function (w) { - var abortAndCB = Util.both(w.abort, cb); - Env.blobStore.status(safeKey, w(function (err, inProgress) { - // if there's an error something is weird - if (err) { return void abortAndCB(err); } - - // we cannot upload two things at once - if (inProgress) { return void abortAndCB(void 0, true); } - })); - }).nThen(function () { - // if yuo're here then there are no pending uploads - // check if you have space in your quota to upload something of this size - getFreeSpace(Env, safeKey, function (e, free) { - if (e) { return void cb(e); } - if (filesize >= free) { return cb('NOT_ENOUGH_SPACE'); } - cb(void 0, false); - }); - }); -}; - -/*:: -const flow_Config = require('./config.example.js'); -type Config_t = typeof(flow_Config); -import type { ChainPadServer_Storage_t } from './storage/file.js' -type NetfluxWebsocketSrvContext_t = { - store: ChainPadServer_Storage_t, - getHistoryOffset: ( - ctx: NetfluxWebsocketSrvContext_t, - channelName: string, - lastKnownHash: ?string, - cb: (err: ?Error, offset: ?number)=>void - )=>void -}; -*/ -RPC.create = function ( - config /*:Config_t*/, - cb /*:(?Error, ?Function)=>void*/ -) { - Log = config.log; - - // load pin-store... - Log.silly('LOADING RPC MODULE'); - - var keyOrDefaultString = function (key, def) { - return typeof(config[key]) === 'string'? config[key]: def; - }; - - var Env = { - retainData: config.retainData || false, - defaultStorageLimit: config.defaultStorageLimit, - maxUploadSize: config.maxUploadSize || (20 * 1024 * 1024), - Sessions: {}, - paths: {}, - msgStore: config.store, - pinStore: (undefined /*:any*/), - pinnedPads: {}, - evPinnedPadsReady: mkEvent(true), - limits: {}, - admins: [], - }; - - try { - Env.admins = (config.adminKeys || []).map(function (k) { - k = k.replace(/\/+$/, ''); - var s = k.split('/'); - return s[s.length-1]; - }); - } catch (e) { - console.error("Can't parse admin keys. Please update or fix your config.js file!"); - } - - var Sessions = Env.Sessions; - var paths = Env.paths; - var pinPath = paths.pin = keyOrDefaultString('pinPath', './pins'); - paths.block = keyOrDefaultString('blockPath', './block'); - paths.data = keyOrDefaultString('filePath', './datastore'); - paths.staging = keyOrDefaultString('blobStagingPath', './blobstage'); - paths.blob = keyOrDefaultString('blobPath', './blob'); - - var isUnauthenticateMessage = function (msg) { - return msg && msg.length === 2 && isUnauthenticatedCall(msg[0]); - }; - - var handleUnauthenticatedMessage = function (msg, respond, nfwssCtx) { - Log.silly('LOG_RPC', msg[0]); - switch (msg[0]) { - case 'GET_HISTORY_OFFSET': { - if (typeof(msg[1]) !== 'object' || typeof(msg[1].channelName) !== 'string') { - return respond('INVALID_ARG_FORMAT', msg); - } - const msgHash = typeof(msg[1].msgHash) === 'string' ? msg[1].msgHash : undefined; - nfwssCtx.getHistoryOffset(nfwssCtx, msg[1].channelName, msgHash, (e, ret) => { - if (e) { - if (e.code !== 'ENOENT') { - WARN(e.stack, msg); - } - return respond(e.message); - } - respond(e, [null, ret, null]); - }); - break; - } - case 'GET_FILE_SIZE': - return void getFileSize(Env, msg[1], function (e, size) { - WARN(e, msg[1]); - respond(e, [null, size, null]); - }); - case 'GET_METADATA': - return void getMetadata(Env, msg[1], function (e, data) { - WARN(e, msg[1]); - respond(e, [null, data, null]); - }); - case 'GET_MULTIPLE_FILE_SIZE': - return void getMultipleFileSize(Env, msg[1], function (e, dict) { - if (e) { - WARN(e, dict); - return respond(e); - } - respond(e, [null, dict, null]); - }); - case 'GET_DELETED_PADS': - return void getDeletedPads(Env, msg[1], function (e, list) { - if (e) { - WARN(e, msg[1]); - return respond(e); - } - respond(e, [null, list, null]); - }); - case 'IS_CHANNEL_PINNED': - return void isChannelPinned(Env, msg[1], function (isPinned) { - respond(null, [null, isPinned, null]); - }); - case 'IS_NEW_CHANNEL': - return void isNewChannel(Env, msg[1], function (e, isNew) { - respond(e, [null, isNew, null]); - }); - case 'WRITE_PRIVATE_MESSAGE': - return void writePrivateMessage(Env, msg[1], nfwssCtx, function (e, output) { - respond(e, output); - }); - default: - Log.warn("UNSUPPORTED_RPC_CALL", msg); - return respond('UNSUPPORTED_RPC_CALL', msg); - } - }; - - var rpc0 = function (ctx, data, respond) { - if (!Array.isArray(data)) { - Log.debug('INVALID_ARG_FORMET', data); - return void respond('INVALID_ARG_FORMAT'); - } - - if (!data.length) { - return void respond("INSUFFICIENT_ARGS"); - } else if (data.length !== 1) { - Log.debug('UNEXPECTED_ARGUMENTS_LENGTH', data); - } - - var msg = data[0].slice(0); - - if (!Array.isArray(msg)) { - return void respond('INVALID_ARG_FORMAT'); - } - - if (isUnauthenticateMessage(msg)) { - return handleUnauthenticatedMessage(msg, respond, ctx); - } - - var signature = msg.shift(); - var publicKey = msg.shift(); - - // make sure a user object is initialized in the cookie jar - if (publicKey) { - getSession(Sessions, publicKey); - } else { - Log.debug("NO_PUBLIC_KEY_PROVIDED", publicKey); - } - - var cookie = msg[0]; - if (!isValidCookie(Sessions, publicKey, cookie)) { - // no cookie is fine if the RPC is to get a cookie - if (msg[1] !== 'COOKIE') { - return void respond('NO_COOKIE'); - } - } - - var serialized = JSON.stringify(msg); - - if (!(serialized && typeof(publicKey) === 'string')) { - return void respond('INVALID_MESSAGE_OR_PUBLIC_KEY'); - } - - if (isAuthenticatedCall(msg[1])) { - if (checkSignature(serialized, signature, publicKey) !== true) { - return void respond("INVALID_SIGNATURE_OR_PUBLIC_KEY"); - } - } else if (msg[1] !== 'UPLOAD') { - Log.warn('INVALID_RPC_CALL', msg[1]); - return void respond("INVALID_RPC_CALL"); - } - - var safeKey = escapeKeyCharacters(publicKey); - /* If you have gotten this far, you have signed the message with the - public key which you provided. - - We can safely modify the state for that key - - OR it's an unauthenticated call, which must not modify the state - for that key in a meaningful way. - */ - - // discard validated cookie from message - msg.shift(); - - var Respond = function (e, msg) { - var session = Sessions[safeKey]; - var token = session? session.tokens.slice(-1)[0]: ''; - var cookie = makeCookie(token).join('|'); - respond(e ? String(e): e, [cookie].concat(typeof(msg) !== 'undefined' ?msg: [])); - }; - - if (typeof(msg) !== 'object' || !msg.length) { - return void Respond('INVALID_MSG'); - } - - var handleMessage = function () { - Log.silly('LOG_RPC', msg[0]); - switch (msg[0]) { - case 'COOKIE': return void Respond(void 0); - case 'RESET': - return resetUserPins(Env, safeKey, msg[1], function (e, hash) { - //WARN(e, hash); - return void Respond(e, hash); - }); - case 'PIN': - return pinChannel(Env, safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'UNPIN': - return unpinChannel(Env, safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'GET_HASH': - return void getHash(Env, safeKey, function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'GET_TOTAL_SIZE': // TODO cache this, since it will get called quite a bit - return getTotalSize(Env, safeKey, function (e, size) { - if (e) { - WARN(e, safeKey); - return void Respond(e); - } - Respond(e, size); - }); - case 'GET_FILE_SIZE': - return void getFileSize(Env, msg[1], function (e, size) { - WARN(e, msg[1]); - Respond(e, size); - }); - case 'UPDATE_LIMITS': - return void updateLimits(Env, config, safeKey, function (e, limit) { - if (e) { - WARN(e, limit); - return void Respond(e); - } - Respond(void 0, limit); - }); - case 'GET_LIMIT': - return void getLimit(Env, safeKey, function (e, limit) { - if (e) { - WARN(e, limit); - return void Respond(e); - } - Respond(void 0, limit); - }); - case 'GET_MULTIPLE_FILE_SIZE': - return void getMultipleFileSize(Env, msg[1], function (e, dict) { - if (e) { - WARN(e, dict); - return void Respond(e); - } - Respond(void 0, dict); - }); - case 'EXPIRE_SESSION': - return void setTimeout(function () { - expireSession(Sessions, safeKey); - Respond(void 0, "OK"); - }); - case 'CLEAR_OWNED_CHANNEL': - return void clearOwnedChannel(Env, msg[1], publicKey, function (e, response) { - if (e) { return void Respond(e); } - Respond(void 0, response); - }); - - case 'REMOVE_OWNED_CHANNEL': - return void removeOwnedChannel(Env, msg[1], publicKey, function (e) { - if (e) { return void Respond(e); } - Respond(void 0, "OK"); - }); - case 'REMOVE_PINS': - return void removePins(Env, safeKey, function (e) { - if (e) { return void Respond(e); } - Respond(void 0, "OK"); - }); - case 'UPLOAD': - return void Env.blobStore.upload(safeKey, msg[1], function (e, len) { - WARN(e, len); - Respond(e, len); - }); - case 'UPLOAD_STATUS': - var filesize = msg[1]; - return void upload_status(Env, safeKey, filesize, function (e, yes) { - if (!e && !yes) { - // no pending uploads, set the new size - var user = getSession(Sessions, safeKey); - user.pendingUploadSize = filesize; - user.currentUploadSize = 0; - } - Respond(e, yes); - }); - case 'UPLOAD_COMPLETE': - return void Env.blobStore.complete(safeKey, msg[1], function (e, hash) { - WARN(e, hash); - Respond(e, hash); - }); - case 'OWNED_UPLOAD_COMPLETE': - return void Env.blobStore.completeOwned(safeKey, msg[1], function (e, blobId) { - WARN(e, blobId); - Respond(e, blobId); - }); - case 'UPLOAD_CANCEL': - // msg[1] is fileSize - // if we pass it here, we can start an upload right away without calling - // UPLOAD_STATUS again - return void Env.blobStore.cancel(safeKey, msg[1], function (e) { - WARN(e, 'UPLOAD_CANCEL'); - Respond(e); - }); - case 'WRITE_LOGIN_BLOCK': - return void writeLoginBlock(Env, msg[1], function (e) { - if (e) { - WARN(e, 'WRITE_LOGIN_BLOCK'); - return void Respond(e); - } - Respond(e); - }); - case 'REMOVE_LOGIN_BLOCK': - return void removeLoginBlock(Env, msg[1], function (e) { - if (e) { - WARN(e, 'REMOVE_LOGIN_BLOCK'); - return void Respond(e); - } - Respond(e); - }); - case 'ADMIN': - return void adminCommand(Env, ctx, safeKey, config, msg[1], function (e, result) { - if (e) { - WARN(e, result); - return void Respond(e); - } - Respond(void 0, result); - }); - case 'SET_METADATA': - return void setMetadata(Env, msg[1], publicKey, function (e, data) { - if (e) { - WARN(e, data); - return void Respond(e); - } - Respond(void 0, data); - }); - default: - return void Respond('UNSUPPORTED_RPC_CALL', msg); - } - }; - - handleMessage(true); - }; - - var rpc = function ( - ctx /*:NetfluxWebsocketSrvContext_t*/, - data /*:Array>*/, - respond /*:(?string, ?Array)=>void*/) - { - try { - return rpc0(ctx, data, respond); - } catch (e) { - console.log("Error from RPC with data " + JSON.stringify(data)); - console.log(e.stack); - } - }; - - var updateLimitDaily = function () { - updateLimits(Env, config, undefined, function (e) { - if (e) { - WARN('limitUpdate', e); - } - }); - }; - updateLimitDaily(); - setInterval(updateLimitDaily, 24*3600*1000); - - loadChannelPins(Env); - - nThen(function (w) { - Store.create({ - filePath: pinPath, - }, w(function (s) { - Env.pinStore = s; - })); - BlobStore.create({ - blobPath: config.blobPath, - blobStagingPath: config.blobStagingPath, - archivePath: config.archivePath, - getSession: function (safeKey) { - return getSession(Sessions, safeKey); - }, - }, w(function (err, blob) { - if (err) { throw new Error(err); } - Env.blobStore = blob; - })); - }).nThen(function () { - cb(void 0, rpc); - // expire old sessions once per minute - setInterval(function () { - expireSessions(Sessions); - }, SESSION_EXPIRATION_TIME); - }); -}; diff --git a/scripts/check-account-deletion.js b/scripts/check-account-deletion.js index 0532e69ed..91020bde9 100644 --- a/scripts/check-account-deletion.js +++ b/scripts/check-account-deletion.js @@ -1,7 +1,6 @@ /* jshint esversion: 6, node: true */ const Fs = require('fs'); const nThen = require('nthen'); -const Pinned = require('./pinned'); const Nacl = require('tweetnacl/nacl-fast'); const Path = require('path'); const Pins = require('../lib/pins'); @@ -41,7 +40,7 @@ nThen((waitFor) => { pinned = Pins.calculateFromLog(content.toString('utf8'), f); })); }).nThen((waitFor) => { - Pinned.load(waitFor((err, d) => { + Pins.list(waitFor((err, d) => { data = Object.keys(d); }), { exclude: [edPublic + '.ndjson'] diff --git a/scripts/diagnose-archive-conflicts.js b/scripts/diagnose-archive-conflicts.js index 8617150fc..0e75f4abe 100644 --- a/scripts/diagnose-archive-conflicts.js +++ b/scripts/diagnose-archive-conflicts.js @@ -1,6 +1,6 @@ var nThen = require("nthen"); -var Store = require("../storage/file"); +var Store = require("../lib/storage/file"); var config = require("../lib/load-config"); var store; diff --git a/scripts/evict-inactive.js b/scripts/evict-inactive.js index 13028b8ff..18730fd1e 100644 --- a/scripts/evict-inactive.js +++ b/scripts/evict-inactive.js @@ -1,8 +1,8 @@ var nThen = require("nthen"); -var Store = require("../storage/file"); -var BlobStore = require("../storage/blob"); -var Pinned = require("./pinned"); +var Store = require("../lib/storage/file"); +var BlobStore = require("../lib/storage/blob"); +var Pins = require("../lib/pins"); var config = require("../lib/load-config"); // the administrator should have set an 'inactiveTime' in their config @@ -15,8 +15,6 @@ var inactiveTime = +new Date() - (config.inactiveTime * 24 * 3600 * 1000); // files which were archived before this date can be considered safe to remove var retentionTime = +new Date() - (config.archiveRetentionTime * 24 * 3600 * 1000); -var retainData = Boolean(config.retainData); - var getNewestTime = function (stats) { return stats[['atime', 'ctime', 'mtime'].reduce(function (a, b) { return stats[b] > stats[a]? b: a; @@ -40,7 +38,7 @@ nThen(function (w) { store = _; })); // load the list of pinned files so you know which files // should not be archived or deleted - Pinned.load(w(function (err, _) { + Pins.list(w(function (err, _) { if (err) { w.abort(); return void console.error(err); @@ -176,23 +174,6 @@ nThen(function (w) { if (pins[item.blobId]) { return void next(); } if (item && getNewestTime(item) > retentionTime) { return void next(); } - if (!retainData) { - return void blobs.remove.blob(item.blobId, function (err) { - if (err) { - Log.error("EVICT_BLOB_ERROR", { - error: err, - item: item, - }); - return void next(); - } - Log.info("EVICT_BLOB_INACTIVE", { - item: item, - }); - removed++; - next(); - }); - } - blobs.archive.blob(item.blobId, function (err) { if (err) { Log.error("EVICT_ARCHIVE_BLOB_ERROR", { @@ -247,7 +228,6 @@ nThen(function (w) { Log.info("EVICT_BLOB_PROOFS_REMOVED", removed); })); }).nThen(function (w) { - var removed = 0; var channels = 0; var archived = 0; @@ -279,42 +259,22 @@ nThen(function (w) { // ignore the channel if it's pinned if (pins[item.channel]) { return void cb(); } - // if the server is configured to retain data, archive the channel - if (config.retainData) { - return void store.archiveChannel(item.channel, w(function (err) { - if (err) { - Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', { - error: err, - channel: item.channel, - }); - return void cb(); - } - Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel); - archived++; - cb(); - })); - } - - // otherwise remove it - store.removeChannel(item.channel, w(function (err) { + return void store.archiveChannel(item.channel, w(function (err) { if (err) { - Log.error('EVICT_CHANNEL_REMOVAL_ERROR', { + Log.error('EVICT_CHANNEL_ARCHIVAL_ERROR', { error: err, channel: item.channel, }); return void cb(); } - Log.info('EVICT_CHANNEL_REMOVAL', item.channel); - removed++; + Log.info('EVICT_CHANNEL_ARCHIVAL', item.channel); + archived++; cb(); })); }; var done = function () { - if (config.retainData) { - return void Log.info('EVICT_CHANNELS_ARCHIVED', archived); - } - return void Log.info('EVICT_CHANNELS_REMOVED', removed); + return void Log.info('EVICT_CHANNELS_ARCHIVED', archived); }; store.listChannels(handler, w(done)); diff --git a/scripts/expire-channels.js b/scripts/expire-channels.js index 2479fc193..9151398d2 100644 --- a/scripts/expire-channels.js +++ b/scripts/expire-channels.js @@ -1,9 +1,9 @@ var nThen = require("nthen"); -var Tasks = require("../storage/tasks"); +var Tasks = require("../lib/storage/tasks"); var Logger = require("../lib/log"); var config = require("../lib/load-config"); -var FileStorage = require('../' + config.storage || './storage/file'); +var FileStorage = require('../lib/storage/file'); nThen(function (w) { Logger.create(config, w(function (_log) { diff --git a/scripts/migrations/migrate-tasks-v1.js b/scripts/migrations/migrate-tasks-v1.js index 365faeb4c..40b8d7a87 100644 --- a/scripts/migrations/migrate-tasks-v1.js +++ b/scripts/migrations/migrate-tasks-v1.js @@ -1,5 +1,5 @@ var nThen = require("nthen"); -var Tasks = require("../../storage/tasks"); +var Tasks = require("../../lib/storage/tasks"); var Logger = require("../../lib/log"); var config = require("../../lib/load-config"); @@ -7,7 +7,7 @@ var config = require("../../lib/load-config"); // this isn't strictly necessary for what we want to do // but the API requires it, and I don't feel like changing that // --ansuz -var FileStorage = require("../../" + (config.storage || "./storage/file")); +var FileStorage = require("../../lib/storage/file"); var tasks; nThen(function (w) { diff --git a/scripts/restore-archived.js b/scripts/restore-archived.js index a420e35e5..3f68b607e 100644 --- a/scripts/restore-archived.js +++ b/scripts/restore-archived.js @@ -1,6 +1,6 @@ var nThen = require("nthen"); -var Store = require("../storage/file"); +var Store = require("../lib/storage/file"); var config = require("../lib/load-config"); var store; diff --git a/scripts/tests/test-pins.js b/scripts/tests/test-pins.js new file mode 100644 index 000000000..712fe621b --- /dev/null +++ b/scripts/tests/test-pins.js @@ -0,0 +1,46 @@ +/*jshint esversion: 6 */ +const Pins = require("../../lib/pins"); + +var stats = { + users: 0, + lines: 0, // how many lines did you iterate over + surplus: 0, // how many of those lines were not needed? + pinned: 0, // how many files are pinned? + duplicated: 0, +}; + +var handler = function (ref, id /* safeKey */, pinned) { + if (ref.surplus) { + //console.log("%s has %s trimmable lines", id, ref.surplus); + stats.surplus += ref.surplus; + } + + for (var item in ref.pins) { + if (!pinned.hasOwnProperty(item)) { + //console.log("> %s is pinned", item); + stats.pinned++; + } else { + //console.log("> %s was already pinned", item); + stats.duplicated++; + } + } + + stats.users++; + stats.lines += ref.index; + //console.log(ref, id); +}; + +Pins.list(function (err) { + if (err) { return void console.error(err); } +/* + for (var id in pinned) { + console.log(id); + stats.pinned++; + } +*/ + console.log(stats); +}, { + pinPath: require("../../lib/load-config").pinPath, + handler: handler, +}); + diff --git a/scripts/tests/test-plan.js b/scripts/tests/test-plan.js new file mode 100644 index 000000000..e8624514a --- /dev/null +++ b/scripts/tests/test-plan.js @@ -0,0 +1,41 @@ +/*jshint esversion: 6 */ +const Plan = require("../../lib/plan"); + +var rand_delay = function (f) { + setTimeout(f, Math.floor(Math.random() * 1500) + 250); +}; + +var plan = Plan(6).job(1, function (next) { + [1,2,3,4,5,6,7,8,9,10,11,12].forEach(function (n) { + plan.job(0, function (next) { + rand_delay(function () { + console.log("finishing job %s", n); + next(); + }); + }); + }); + console.log("finishing job 0"); + next(); +}).job(2, function (next) { + console.log("finishing job 13"); + + [ + 100, + 200, + 300, + 400 + ].forEach(function (n) { + plan.job(3, function (next) { + rand_delay(function () { + console.log("finishing job %s", n); + next(); + }); + }); + }); + + next(); +}).done(function () { console.log("DONE"); }).start(); + +//console.log(plan); + +//plan.start(); diff --git a/scripts/tests/test-rpc.js b/scripts/tests/test-rpc.js index 70ea13053..ce432408c 100644 --- a/scripts/tests/test-rpc.js +++ b/scripts/tests/test-rpc.js @@ -159,6 +159,13 @@ var createUser = function (config, cb) { } wc.leave(); })); + }).nThen(function (w) { + // FIXME give the server time to write your mailbox data before checking that it's correct + // chainpad-server sends an ACK before the channel has actually been created + // causing you to think that everything is good. + // without this timeout the GET_METADATA rpc occasionally returns before + // the metadata has actually been written to the disk. + setTimeout(w(), 500); }).nThen(function (w) { // confirm that you own your mailbox user.anonRpc.send("GET_METADATA", user.mailboxChannel, w(function (err, data) { @@ -227,6 +234,18 @@ var createUser = function (config, cb) { return void cb(err); } })); + }).nThen(function (w) { + // some basic sanity checks... + user.rpc.getServerHash(w(function (err, hash) { + if (err) { + w.abort(); + return void cb(err); + } + if (hash !== EMPTY_ARRAY_HASH) { + console.error("EXPECTED EMPTY ARRAY HASH"); + process.exit(1); + } + })); }).nThen(function () { user.cleanup = function (cb) { @@ -338,9 +357,154 @@ nThen(function (w) { bob.name = 'bob'; //console.log("Initialized Bob"); })); +}).nThen(function (w) { + // restrict access to oscar's mailbox channel + oscar.rpc.send('SET_METADATA', { + command: 'RESTRICT_ACCESS', + channel: oscar.mailboxChannel, + value: [ true ] + }, w(function (err, response) { + if (err) { + return void console.log(err); + } + var metadata = response[0]; + if (!(metadata && metadata.restricted)) { + throw new Error("EXPECTED MAILBOX TO BE RESTRICTED"); + } + })); +}).nThen(function (w) { + alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) { + if (!response) { throw new Error("EXPECTED RESPONSE"); } + var metadata = response[0]; + var expected_fields = ['restricted', 'allowed', 'rejected']; + for (var key in metadata) { + if (expected_fields.indexOf(key) === -1) { + console.log(metadata); + throw new Error("EXPECTED METADATA TO BE RESTRICTED"); + } + } + })); +}).nThen(function (w) { + alice.anonRpc.send('WRITE_PRIVATE_MESSAGE', [ + oscar.mailboxChannel, + '["VANDALISM"]', + ], w(function (err) { + if (err !== 'INSUFFICIENT_PERMISSIONS') { + throw new Error("EXPECTED INSUFFICIENT PERMISSIONS ERROR"); + } + })); +}).nThen(function (w) { + // add alice to oscar's mailbox's allow list for some reason + oscar.rpc.send('SET_METADATA', { + command: 'ADD_ALLOWED', + channel: oscar.mailboxChannel, + value: [ + alice.edKeys.edPublic + ] + }, w(function (err, response) { + var metadata = response && response[0]; + if (!metadata || !Array.isArray(metadata.allowed) || + metadata.allowed.indexOf(alice.edKeys.edPublic) === -1) { + throw new Error("EXPECTED ALICE TO BE IN THE ALLOW LIST"); + } + })); +}).nThen(function (w) { + oscar.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) { + if (err) { + throw new Error("OSCAR SHOULD BE ABLE TO READ HIS OWN METADATA"); + } + var metadata = response && response[0]; + + if (!metadata) { + throw new Error("EXPECTED METADATA"); + } + + if (metadata.allowed[0] !== alice.edKeys.edPublic) { + throw new Error("EXPECTED ALICE TO BE ON ALLOW LIST"); + } + })); }).nThen(function () { - //setTimeout(w(), 500); + alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, function (err, response) { + var metadata = response && response[0]; + if (!metadata || !metadata.restricted || !metadata.channel) { + throw new Error("EXPECTED FULL ACCESS TO CHANNEL METADATA"); + } + }); +}).nThen(function (w) { + //throw new Error("boop"); + // add alice as an owner of oscar's mailbox for some reason + oscar.rpc.send('SET_METADATA', { + command: 'ADD_OWNERS', + channel: oscar.mailboxChannel, + value: [ + alice.edKeys.edPublic + ] + }, Util.mkTimeout(w(function (err) { + if (err === 'TIMEOUT') { + throw new Error(err); + } + if (err) { + throw new Error("ADD_OWNERS_FAILURE"); + } + }), 2000)); +}).nThen(function (w) { + // alice should now be able to read oscar's mailbox metadata + alice.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) { + if (err) { + throw new Error("EXPECTED ALICE TO BE ALLOWED TO READ OSCAR'S METADATA"); + } + var metadata = response && response[0]; + if (!metadata) { throw new Error("EXPECTED METADATA"); } + if (metadata.allowed.length !== 0) { + throw new Error("EXPECTED AN EMPTY ALLOW LIST"); + } + })); +}).nThen(function (w) { + // disable the access restrictionallow list + oscar.rpc.send('SET_METADATA', { + command: 'RESTRICT_ACCESS', + channel: oscar.mailboxChannel, + value: [ + false + ] + }, w(function (err) { + if (err) { + throw new Error("COULD_NOT_DISABLE_RESTRICTED_ACCESS"); + } + })); + // add alice to oscar's mailbox's allow list for some reason + oscar.rpc.send('SET_METADATA', { + command: 'ADD_ALLOWED', + channel: oscar.mailboxChannel, + value: [ + bob.edKeys.edPublic + ] + }, w(function (err) { + if (err) { + return void console.error(err); + } + })); +}).nThen(function (w) { + oscar.anonRpc.send('GET_METADATA', oscar.mailboxChannel, w(function (err, response) { + if (err) { + throw new Error("OSCAR SHOULD BE ABLE TO READ HIS OWN METADATA"); + } + var metadata = response && response[0]; + + if (!metadata) { + throw new Error("EXPECTED METADATA"); + } + + if (metadata.allowed[0] !== bob.edKeys.edPublic) { + throw new Error("EXPECTED ALICE TO BE ON ALLOW LIST"); + } + if (metadata.restricted) { + throw new Error("RESTRICTED_ACCESS_NOT_DISABLED"); + } + })); +}).nThen(function () { + //setTimeout(w(), 500); }).nThen(function (w) { // Alice loads the roster... var rosterKeys = Crypto.Team.deriveMemberKeys(sharedConfig.rosterSeed, alice.curveKeys); @@ -491,7 +655,7 @@ nThen(function (w) { console.error("checkpoint by member failed as expected"); })); }).nThen(function (w) { - console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2)); + //console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2)); // oscar describes the team oscar.roster.metadata({ @@ -499,7 +663,7 @@ nThen(function (w) { topic: "pewpewpew", }, w(function (err) { if (err) { return void console.log(err); } - console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2)); + //console.log("STATE =", JSON.stringify(oscar.roster.getState(), null, 2)); })); }).nThen(function (w) { // oscar sends a checkpoint @@ -554,6 +718,7 @@ nThen(function (w) { })); }).nThen(function (w) { oscar.roster.checkpoint(w(function (err) { + oscar.lastRosterCheckpointHash = oscar.roster.getLastCheckpointHash(); // FIXME bob should connect to this to avoid extra messages if (!err) { return; } console.error("Checkpoint by an owner failed unexpectedly"); console.error(err); @@ -578,21 +743,21 @@ nThen(function (w) { channel: rosterKeys.channel, keys: rosterKeys, anon_rpc: bob.anonRpc, - lastKnownHash: oscar.lastKnownHash, + //lastKnownHash: oscar.lastRosterCheckpointHash + //lastKnownHash: oscar.lastKnownHash, // FIXME this doesn't work. off-by-one? }, w(function (err, roster) { if (err) { w.abort(); return void console.trace(err); } - bob.roster = roster; if (JSON.stringify(bob.roster.getState()) !== JSON.stringify(oscar.roster.getState())) { - console.log("BOB AND OSCAR DO NOT HAVE THE SAME STATE"); + //console.log("BOB AND OSCAR DO NOT HAVE THE SAME STATE"); console.log("BOB =", JSON.stringify(bob.roster.getState(), null, 2)); console.log("OSCAR =", JSON.stringify(oscar.roster.getState(), null, 2)); + throw new Error("BOB AND OSCAR DO NOT HAVE THE SAME STATE"); } - bob.destroy.reg(function () { roster.stop(); }); @@ -639,8 +804,8 @@ nThen(function (w) { bob.roster.describe(data, w(function (err) { if (err) { - console.error("self-description by a member failed unexpectedly"); - process.exit(1); + console.error(err); + throw new Error("self-description by a member failed unexpectedly"); } })); }).nThen(function (w) { diff --git a/scripts/tests/test-scheduler.js b/scripts/tests/test-scheduler.js new file mode 100644 index 000000000..6a076d5aa --- /dev/null +++ b/scripts/tests/test-scheduler.js @@ -0,0 +1,220 @@ +/* three types of actions: + * read + * write + * append + each of which take a random amount of time + +*/ +var Util = require("../../lib/common-util"); +var schedule = require("../../lib/schedule")(); +var nThen = require("nthen"); + +var rand = function (n) { + return Math.floor(Math.random() * n); +}; + +var rand_time = function () { + // between 51 and 151 + return rand(300) + 25; +}; + +var makeAction = function (type) { + var i = 0; + return function (time) { + var j = i++; + return function (next) { + console.log(" Beginning action: %s#%s", type, j); + setTimeout(function () { + console.log(" Completed action: %s#%s", type, j); + next(); + }, time); + return j; + }; + }; +}; + +var TYPES = ['WRITE', 'READ', 'APPEND']; +var chooseAction = function () { + var n = rand(100); + + if (n < 50) { return 'APPEND'; } + if (n < 90) { return 'READ'; } + return 'WRITE'; + + //return TYPES[rand(3)]; +}; + +var test = function (script, cb) { + var uid = Util.uid(); + + var TO_RUN = script.length; + var total_run = 0; + + var parallel = 0; + var last_run_ordered = -1; + //var i = 0; + + var ACTIONS = {}; + TYPES.forEach(function (type) { + ACTIONS[type] = makeAction(type); + }); + + nThen(function (w) { + setTimeout(w(), 3000); + // run scripted actions with assertions + script.forEach(function (scene) { + var type = scene[0]; + var time = typeof(scene[1]) === 'number'? scene[1]: rand_time(); + + var action = ACTIONS[type](time); + console.log("Queuing action of type: %s(%s)", type, time); + + var proceed = w(); + + switch (type) { + case 'APPEND': + return schedule.ordered(uid, w(function (next) { + parallel++; + var temp = action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + if (temp !== (last_run_ordered + 1)) { + throw new Error("out of order"); + } + last_run_ordered = temp; + })); + case 'WRITE': + return schedule.blocking(uid, w(function (next) { + parallel++; + action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + if (parallel > 1) { + console.log("parallelism === %s", parallel); + throw new Error("too much parallel"); + } + })); + case 'READ': + return schedule.unordered(uid, w(function (next) { + parallel++; + action(function () { + parallel--; + total_run++; + proceed(); + next(); + }); + })); + default: + throw new Error("wut"); + } + }); + }).nThen(function () { + // make assertions about the whole script + if (total_run !== TO_RUN) { + console.log("Ran %s / %s", total_run, TO_RUN); + throw new Error("skipped tasks"); + } + console.log("total_run === %s", total_run); + + cb(); + }); +}; + + +var randomScript = function () { + var len = rand(15) + 10; + var script = []; + while (len--) { + script.push([ + chooseAction(), + rand_time(), + ]); + } + return script; +}; + +var WRITE = function (t) { + return ['WRITE', t]; +}; +var READ = function (t) { + return ['READ', t]; +}; + +var APPEND = function (t) { + return ['APPEND', t]; +}; + +nThen(function (w) { + test([ + ['READ', 150], + ['APPEND', 200], + ['APPEND', 100], + ['READ', 350], + ['WRITE', 400], + ['APPEND', 275], + ['APPEND', 187], + ['WRITE', 330], + ['WRITE', 264], + ['WRITE', 256], + ], w(function () { + console.log("finished pre-scripted test\n"); + })); +}).nThen(function (w) { + test([ + WRITE(289), + APPEND(281), + READ(207), + WRITE(225), + READ(279), + WRITE(300), + READ(331), + APPEND(341), + APPEND(385), + READ(313), + WRITE(285), + READ(304), + APPEND(273), + APPEND(150), + WRITE(246), + READ(244), + WRITE(172), + APPEND(253), + READ(215), + READ(296), + APPEND(281), + APPEND(296), + WRITE(168), + ], w(function () { + console.log("finished 2nd pre-scripted test\n"); + })); +}).nThen(function () { + var totalTests = 50; + var randomTests = 1; + + var last = nThen(function () { + console.log("beginning randomized tests"); + }); + + var queueRandomTest = function (i) { + last = last.nThen(function (w) { + console.log("running random test script #%s\n", i); + test(randomScript(), w(function () { + console.log("finished random test #%s\n", i); + })); + }); + }; + + while (randomTests <=totalTests) { queueRandomTest(randomTests++); } + + last.nThen(function () { + console.log("finished %s random tests", totalTests); + }); +}); + + diff --git a/server.js b/server.js index 399eb1442..9b82d7c3c 100644 --- a/server.js +++ b/server.js @@ -4,17 +4,14 @@ var Express = require('express'); var Http = require('http'); var Fs = require('fs'); -var WebSocketServer = require('ws').Server; -var NetfluxSrv = require('chainpad-server/NetfluxWebsocketSrv'); var Package = require('./package.json'); var Path = require("path"); var nThen = require("nthen"); +var Util = require("./lib/common-util"); +var Default = require("./lib/defaults"); var config = require("./lib/load-config"); -// support multiple storage back ends -var Storage = require('./storage/file'); - var app = Express(); // mode can be FRESH (default), DEV, or PACKAGE @@ -39,7 +36,50 @@ if (process.env.PACKAGE) { FRESH_KEY = +new Date(); } +(function () { + // you absolutely must provide an 'httpUnsafeOrigin' + if (typeof(config.httpUnsafeOrigin) !== 'string') { + throw new Error("No 'httpUnsafeOrigin' provided"); + } + + config.httpUnsafeOrigin = config.httpUnsafeOrigin.trim(); + + // fall back to listening on a local address + // if httpAddress is not a string + if (typeof(config.httpAddress) !== 'string') { + config.httpAddress = '127.0.0.1'; + } + + // listen on port 3000 if a valid port number was not provided + if (typeof(config.httpPort) !== 'number' || config.httpPort > 65535) { + config.httpPort = 3000; + } + + if (typeof(config.httpSafeOrigin) !== 'string') { + if (typeof(config.httpSafePort) !== 'number') { + config.httpSafePort = config.httpPort + 1; + } + + if (DEV_MODE) { return; } + console.log(` + m m mm mmmmm mm m mmmmm mm m mmm m + # # # ## # "# #"m # # #"m # m" " # + " #"# # # # #mmmm" # #m # # # #m # # mm # + ## ##" #mm# # "m # # # # # # # # # + # # # # # " # ## mm#mm # ## "mmm" # +`); + + console.log("\nNo 'httpSafeOrigin' provided."); + console.log("Your configuration probably isn't taking advantage of all of CryptPad's security features!"); + console.log("This is acceptable for development, otherwise your users may be at risk.\n"); + + console.log("Serving sandboxed content via port %s.\nThis is probably not what you want for a production instance!\n", config.httpSafePort); + } +}()); + +var configCache = {}; config.flushCache = function () { + configCache = {}; FRESH_KEY = +new Date(); if (!(DEV_MODE || FRESH_MODE)) { FRESH_MODE = true; } if (!config.log) { return; } @@ -49,11 +89,21 @@ config.flushCache = function () { const clone = (x) => (JSON.parse(JSON.stringify(x))); var setHeaders = (function () { - if (typeof(config.httpHeaders) !== 'object') { return function () {}; } + // load the default http headers unless the admin has provided their own via the config file + var headers; + + var custom = config.httpHeaders; + // if the admin provided valid http headers then use them + if (custom && typeof(custom) === 'object' && !Array.isArray(custom)) { + headers = clone(custom); + } else { + // otherwise use the default + headers = Default.httpHeaders(); + } - const headers = clone(config.httpHeaders); - if (config.contentSecurity) { - headers['Content-Security-Policy'] = clone(config.contentSecurity); + // next define the base Content Security Policy (CSP) headers + if (typeof(config.contentSecurity) === 'string') { + headers['Content-Security-Policy'] = config.contentSecurity; if (!/;$/.test(headers['Content-Security-Policy'])) { headers['Content-Security-Policy'] += ';' } if (headers['Content-Security-Policy'].indexOf('frame-ancestors') === -1) { // backward compat for those who do not merge the new version of the config @@ -61,19 +111,23 @@ var setHeaders = (function () { // It also fixes the cross-domain iframe. headers['Content-Security-Policy'] += "frame-ancestors *;"; } + } else { + // use the default CSP headers constructed with your domain + headers['Content-Security-Policy'] = Default.contentSecurity(config.httpUnsafeOrigin); } + const padHeaders = clone(headers); - if (config.padContentSecurity) { - padHeaders['Content-Security-Policy'] = clone(config.padContentSecurity); + if (typeof(config.padContentSecurity) === 'string') { + padHeaders['Content-Security-Policy'] = config.padContentSecurity; + } else { + padHeaders['Content-Security-Policy'] = Default.padContentSecurity(config.httpUnsafeOrigin); } if (Object.keys(headers).length) { return function (req, res) { const h = [ - /^\/pad(2)?\/inner\.html.*/, + /^\/pad\/inner\.html.*/, /^\/common\/onlyoffice\/.*\/index\.html.*/, - /^\/sheet\/inner\.html.*/, - /^\/ooslide\/inner\.html.*/, - /^\/oodoc\/inner\.html.*/, + /^\/(sheet|ooslide|oodoc)\/inner\.html.*/, ].some((regex) => { return regex.test(req.url) }) ? padHeaders : headers; @@ -117,15 +171,10 @@ app.use(function (req, res, next) { app.use(Express.static(__dirname + '/www')); -Fs.exists(__dirname + "/customize", function (e) { - if (e) { return; } - console.log("Cryptpad is customizable, see customize.dist/readme.md for details"); -}); - // FIXME I think this is a regression caused by a recent PR // correct this hack without breaking the contributor's intended behaviour. -var mainPages = config.mainPages || ['index', 'privacy', 'terms', 'about', 'contact']; +var mainPages = config.mainPages || Default.mainPages(); var mainPagePattern = new RegExp('^\/(' + mainPages.join('|') + ').html$'); app.get(mainPagePattern, Express.static(__dirname + '/customize')); app.get(mainPagePattern, Express.static(__dirname + '/customize.dist')); @@ -155,38 +204,74 @@ try { }); } catch (e) { console.error("Can't parse admin keys"); } -// TODO, cache this /api/config responses instead of re-computing it each time -app.get('/api/config', function(req, res){ - // TODO precompute any data that isn't dynamic to save some CPU time - var host = req.headers.host.replace(/\:[0-9]+/, ''); - res.setHeader('Content-Type', 'text/javascript'); - res.send('define(function(){\n' + [ - 'var obj = ' + JSON.stringify({ - requireConf: { - waitSeconds: 600, - urlArgs: 'ver=' + Package.version + (FRESH_KEY? '-' + FRESH_KEY: '') + (DEV_MODE? '-' + (+new Date()): ''), - }, - removeDonateButton: (config.removeDonateButton === true), - allowSubscriptions: (config.allowSubscriptions === true), - websocketPath: config.externalWebsocketURL, - httpUnsafeOrigin: config.httpUnsafeOrigin.replace(/^\s*/, ''), - adminEmail: config.adminEmail, - adminKeys: admins, - inactiveTime: config.inactiveTime, - supportMailbox: config.supportMailboxPublicKey - }, null, '\t'), - 'obj.httpSafeOrigin = ' + (function () { - if (config.httpSafeOrigin) { return '"' + config.httpSafeOrigin + '"'; } - if (config.httpSafePort) { - return "(function () { return window.location.origin.replace(/\:[0-9]+$/, ':" + - config.httpSafePort + "'); }())"; - } - return 'window.location.origin'; - }()), - 'return obj', - '});' - ].join(';\n')); -}); +var serveConfig = (function () { + // if dev mode: never cache + var cacheString = function () { + return (FRESH_KEY? '-' + FRESH_KEY: '') + (DEV_MODE? '-' + (+new Date()): ''); + }; + + var template = function (host) { + return [ + 'define(function(){', + 'var obj = ' + JSON.stringify({ + requireConf: { + waitSeconds: 600, + urlArgs: 'ver=' + Package.version + cacheString(), + }, + removeDonateButton: (config.removeDonateButton === true), + allowSubscriptions: (config.allowSubscriptions === true), + websocketPath: config.externalWebsocketURL, + httpUnsafeOrigin: config.httpUnsafeOrigin, + adminEmail: config.adminEmail, + adminKeys: admins, + inactiveTime: config.inactiveTime, + supportMailbox: config.supportMailboxPublicKey, + maxUploadSize: config.maxUploadSize, + premiumUploadSize: config.premiumUploadSize, + }, null, '\t'), + 'obj.httpSafeOrigin = ' + (function () { + if (config.httpSafeOrigin) { return '"' + config.httpSafeOrigin + '"'; } + if (config.httpSafePort) { + return "(function () { return window.location.origin.replace(/\:[0-9]+$/, ':" + + config.httpSafePort + "'); }())"; + } + return 'window.location.origin'; + }()), + 'return obj', + '});' + ].join(';\n') + }; + + var cleanUp = {}; + + return function (req, res) { + var host = req.headers.host.replace(/\:[0-9]+/, ''); + res.setHeader('Content-Type', 'text/javascript'); + // don't cache anything if you're in dev mode + if (DEV_MODE) { + return void res.send(template(host)); + } + // generate a lookup key for the cache + var cacheKey = host + ':' + cacheString(); + // if there's nothing cached for that key... + if (!configCache[cacheKey]) { + // generate the response and cache it in memory + configCache[cacheKey] = template(host); + // and create a function to conditionally evict cache entries + // which have not been accessed in the last 20 seconds + cleanUp[cacheKey] = Util.throttle(function () { + delete cleanUp[cacheKey]; + delete configCache[cacheKey]; + }, 20000); + } + + // successive calls to this function + cleanUp[cacheKey](); + return void res.send(configCache[cacheKey]); + }; +}()); + +app.get('/api/config', serveConfig); var four04_path = Path.resolve(__dirname + '/customize.dist/404.html'); var custom_four04_path = Path.resolve(__dirname + '/customize/404.html'); @@ -207,81 +292,36 @@ app.use(function (req, res, next) { var httpServer = Http.createServer(app); -httpServer.listen(config.httpPort,config.httpAddress,function(){ - var host = config.httpAddress; - var hostName = !host.indexOf(':') ? '[' + host + ']' : host; - - var port = config.httpPort; - var ps = port === 80? '': ':' + port; - - console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps); -}); -if (config.httpSafePort) { - Http.createServer(app).listen(config.httpSafePort, config.httpAddress); -} - -var wsConfig = { server: httpServer }; +nThen(function (w) { + Fs.exists(__dirname + "/customize", w(function (e) { + if (e) { return; } + console.log("Cryptpad is customizable, see customize.dist/readme.md for details"); + })); +}).nThen(function (w) { + httpServer.listen(config.httpPort,config.httpAddress,function(){ + var host = config.httpAddress; + var hostName = !host.indexOf(':') ? '[' + host + ']' : host; -var rpc; -var historyKeeper; + var port = config.httpPort; + var ps = port === 80? '': ':' + port; -var log; + console.log('[%s] server available http://%s%s', new Date().toISOString(), hostName, ps); + }); -// Initialize logging, the the store, then tasks, then rpc, then history keeper and then start the server -var nt = nThen(function (w) { - // set up logger - var Logger = require("./lib/log"); - //console.log("Loading logging module"); - Logger.create(config, w(function (_log) { - log = config.log = _log; - })); -}).nThen(function (w) { - if (config.externalWebsocketURL) { - // if you plan to use an external websocket server - // then you don't need to load any API services other than the logger. - // Just abort. - w.abort(); - return; + if (config.httpSafePort) { + Http.createServer(app).listen(config.httpSafePort, config.httpAddress, w()); } - Storage.create(config, w(function (_store) { - config.store = _store; - })); -}).nThen(function (w) { - var Tasks = require("./storage/tasks"); - Tasks.create(config, w(function (e, tasks) { - if (e) { - throw e; - } - config.tasks = tasks; - if (config.disableIntegratedTasks) { return; } - setInterval(function () { - tasks.runAll(function (err) { - if (err) { - // either TASK_CONCURRENCY or an error with tasks.list - // in either case it is already logged. - } - }); - }, 1000 * 60 * 5); // run every five minutes - })); -}).nThen(function (w) { - require("./rpc").create(config, w(function (e, _rpc) { - if (e) { - w.abort(); - throw e; - } - rpc = _rpc; - })); }).nThen(function () { - var HK = require('./historyKeeper.js'); - var hkConfig = { - tasks: config.tasks, - rpc: rpc, - store: config.store, - log: log, - retainData: Boolean(config.retainData), - }; - historyKeeper = HK.create(hkConfig); -}).nThen(function () { - var wsSrv = new WebSocketServer(wsConfig); - NetfluxSrv.run(wsSrv, config, historyKeeper); + var wsConfig = { server: httpServer }; + + // Initialize logging then start the API server + require("./lib/log").create(config, function (_log) { + config.log = _log; + config.httpServer = httpServer; + + if (config.externalWebsocketURL) { return; } + require("./lib/api").create(config); + }); }); + + diff --git a/www/admin/app-admin.less b/www/admin/app-admin.less index 10e178308..84532c6c5 100644 --- a/www/admin/app-admin.less +++ b/www/admin/app-admin.less @@ -23,5 +23,29 @@ display: flex; flex-flow: column; } + + .cp-support-list-actions { + margin: 10px 0px 10px 2px; + } + + .cp-support-list-ticket:not(.cp-support-list-closed) { + .cp-support-list-message { + &:last-child:not(.cp-support-fromadmin) { + color: @colortheme_cp-red; + background-color: lighten(@colortheme_cp-red, 25%); + .cp-support-showdata { + background-color: lighten(@colortheme_cp-red, 30%); + } + } + } + } + + .cp-support-fromadmin { + color: @colortheme_logo-2; + background-color: #FFF; + .cp-support-message-content { + color: @colortheme_logo-2; + } + } } diff --git a/www/admin/inner.js b/www/admin/inner.js index 7478442c5..ad7083c6c 100644 --- a/www/admin/inner.js +++ b/www/admin/inner.js @@ -172,6 +172,12 @@ define([ if (!supportKey || !APP.privateKey) { return; } var $container = makeBlock('support-list'); var $div = $(h('div.cp-support-container')).appendTo($container); + + var metadataMgr = common.getMetadataMgr(); + var privateData = metadataMgr.getPrivateData(); + var cat = privateData.category || ''; + var linkedId = cat.indexOf('-') !== -1 && cat.slice(8); + var hashesById = {}; var reorder = function () { @@ -200,6 +206,12 @@ define([ }); }; + var to = Util.throttle(function () { + var $ticket = $div.find('.cp-support-list-ticket[data-id="'+linkedId+'"]'); + $ticket[0].scrollIntoView(); + linkedId = undefined; + }, 100); + // Register to the "support" mailbox common.mailbox.subscribe(['supportadmin'], { onMessage: function (data) { @@ -246,6 +258,8 @@ define([ } $ticket.append(APP.support.makeMessage(content, hash)); reorder(); + + if (linkedId) { to(); } } }); return $container; @@ -312,6 +326,9 @@ define([ var metadataMgr = common.getMetadataMgr(); var privateData = metadataMgr.getPrivateData(); var active = privateData.category || 'general'; + if (active.indexOf('-') !== -1) { + active = active.split('-')[0]; + } common.setHash(active); Object.keys(categories).forEach(function (key) { var $category = $('
', {'class': 'cp-sidebarlayout-category'}).appendTo($categories); diff --git a/www/code/app-code.less b/www/code/app-code.less index 219143a34..aa2191be0 100644 --- a/www/code/app-code.less +++ b/www/code/app-code.less @@ -65,47 +65,20 @@ box-sizing: border-box; //font-family: Calibri,Ubuntu,sans-serif; font: @colortheme_app-font; - word-wrap: break-word; position: relative; flex: 1; - h1, h2, h3, h4, h5, h6 { - font-weight: bold; - padding-bottom: 0.3em; - border-bottom: 1px solid #eee; - } - li { - min-height: 22px; - } + + .markdown_main(); + .markdown_cryptpad(); .todo-list-item { - list-style: none; .fa { - position: absolute; - margin-left: -17px; - margin-top: 4px; &.fa-check-square { font-size: 15px; margin-top: 5px; } } } - media-tag { - * { - max-width:100%; - } - iframe[src$=".pdf"] { - width: 100%; - height: 80vh; - max-height: 90vh; - } - } - media-tag:empty { - width: 100px; - height: 100px; - display: inline-block; - border: 1px solid #BBB; - } - .markdown_main(); .cp-app-code-preview-empty { display: none; } diff --git a/www/code/inner.js b/www/code/inner.js index e933bd4bd..5b517c74c 100644 --- a/www/code/inner.js +++ b/www/code/inner.js @@ -170,6 +170,14 @@ define([ e.preventDefault(); var $a = $t.is('a') ? $t : $t.parents('a').first(); var href = $a.attr('href'); + if (/^\/[^\/]/.test(href)) { + var privateData = framework._.cpNfInner.metadataMgr.getPrivateData(); + href = privateData.origin + href; + } else if (/^#/.test(href)) { + var target = document.getElementById('cp-md-0-'+href.slice(1)); + if (target) { target.scrollIntoView(); } + return; + } framework._.sfCommon.openUnsafeURL(href); } }); diff --git a/www/common/common-hash.js b/www/common/common-hash.js index 85ec3b36e..b60ab3306 100644 --- a/www/common/common-hash.js +++ b/www/common/common-hash.js @@ -60,6 +60,23 @@ var factory = function (Util, Crypto, Nacl) { return '/2/' + secret.type + '/view/' + Crypto.b64RemoveSlashes(data.viewKeyStr) + '/' + pass; } }; + + Hash.getHiddenHashFromKeys = function (type, secret, opts) { + opts = opts || {}; + var canEdit = (secret.keys && secret.keys.editKeyStr) || secret.key; + var mode = (!opts.view && canEdit) ? 'edit/' : 'view/'; + var pass = secret.password ? 'p/' : ''; + + if (secret.keys && secret.keys.fileKeyStr) { mode = ''; } + + var hash = '/3/' + type + '/' + mode + secret.channel + '/' + pass; + var hashData = Hash.parseTypeHash(type, hash); + if (hashData && hashData.getHash) { + return hashData.getHash(opts || {}); + } + return hash; + }; + var getFileHashFromKeys = Hash.getFileHashFromKeys = function (secret) { var version = secret.version; var data = secret.keys; @@ -160,12 +177,28 @@ Version 1 }; var parseTypeHash = Hash.parseTypeHash = function (type, hash) { if (!hash) { return; } - var options; + var options = []; var parsed = {}; var hashArr = fixDuplicateSlashes(hash).split('/'); + + var addOptions = function () { + parsed.password = options.indexOf('p') !== -1; + parsed.present = options.indexOf('present') !== -1; + parsed.embed = options.indexOf('embed') !== -1; + parsed.ownerKey = getOwnerKey(options); + }; + if (['media', 'file', 'user', 'invite'].indexOf(type) === -1) { parsed.type = 'pad'; parsed.getHash = function () { return hash; }; + parsed.getOptions = function () { + return { + embed: parsed.embed, + present: parsed.present, + ownerKey: parsed.ownerKey, + password: parsed.password + }; + }; if (hash.slice(0,1) !== '/' && hash.length >= 56) { // Version 0 // Old hash parsed.channel = hash.slice(0, 32); @@ -173,6 +206,18 @@ Version 1 parsed.version = 0; return parsed; } + + // Version >= 1: more hash options + parsed.getHash = function (opts) { + var hash = hashArr.slice(0, 5).join('/') + '/'; + var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; + if (owner) { hash += owner + '/'; } + if (parsed.password || opts.password) { hash += 'p/'; } + if (opts.embed) { hash += 'embed/'; } + if (opts.present) { hash += 'present/'; } + return hash; + }; + if (hashArr[1] && hashArr[1] === '1') { // Version 1 parsed.version = 1; parsed.mode = hashArr[2]; @@ -180,18 +225,8 @@ Version 1 parsed.key = Crypto.b64AddSlashes(hashArr[4]); options = hashArr.slice(5); - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 5).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + return parsed; } if (hashArr[1] && hashArr[1] === '2') { // Version 2 @@ -201,20 +236,19 @@ Version 1 parsed.key = hashArr[4]; options = hashArr.slice(5); - parsed.password = options.indexOf('p') !== -1; - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 5).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (parsed.password) { hash += 'p/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + + return parsed; + } + if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash + parsed.version = 3; + parsed.app = hashArr[2]; + parsed.mode = hashArr[3]; + parsed.channel = hashArr[4]; + + options = hashArr.slice(5); + addOptions(); + return parsed; } return parsed; @@ -222,34 +256,54 @@ Version 1 parsed.getHash = function () { return hashArr.join('/'); }; if (['media', 'file'].indexOf(type) !== -1) { parsed.type = 'file'; + + parsed.getOptions = function () { + return { + embed: parsed.embed, + present: parsed.present, + ownerKey: parsed.ownerKey, + password: parsed.password + }; + }; + + parsed.getHash = function (opts) { + var hash = hashArr.slice(0, 4).join('/') + '/'; + var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; + if (owner) { hash += owner + '/'; } + if (parsed.password || opts.password) { hash += 'p/'; } + if (opts.embed) { hash += 'embed/'; } + if (opts.present) { hash += 'present/'; } + return hash; + }; + if (hashArr[1] && hashArr[1] === '1') { parsed.version = 1; parsed.channel = hashArr[2].replace(/-/g, '/'); parsed.key = hashArr[3].replace(/-/g, '/'); options = hashArr.slice(4); - parsed.ownerKey = getOwnerKey(options); + addOptions(); return parsed; } + if (hashArr[1] && hashArr[1] === '2') { // Version 2 parsed.version = 2; parsed.app = hashArr[2]; parsed.key = hashArr[3]; options = hashArr.slice(4); - parsed.password = options.indexOf('p') !== -1; - parsed.present = options.indexOf('present') !== -1; - parsed.embed = options.indexOf('embed') !== -1; - parsed.ownerKey = getOwnerKey(options); - - parsed.getHash = function (opts) { - var hash = hashArr.slice(0, 4).join('/') + '/'; - var owner = typeof(opts.ownerKey) !== "undefined" ? opts.ownerKey : parsed.ownerKey; - if (owner) { hash += owner + '/'; } - if (parsed.password) { hash += 'p/'; } - if (opts.embed) { hash += 'embed/'; } - if (opts.present) { hash += 'present/'; } - return hash; - }; + addOptions(); + + return parsed; + } + + if (hashArr[1] && hashArr[1] === '3') { // Version 3: hidden hash + parsed.version = 3; + parsed.app = hashArr[2]; + parsed.channel = hashArr[3]; + + options = hashArr.slice(4); + addOptions(); + return parsed; } return parsed; @@ -303,6 +357,10 @@ Version 1 url += '#' + hash; return url; }; + ret.getOptions = function () { + if (!ret.hashData || !ret.hashData.getOptions) { return {}; } + return ret.hashData.getOptions(); + }; if (!/^https*:\/\//.test(href)) { idx = href.indexOf('/#'); @@ -325,6 +383,14 @@ Version 1 return ret; }; + Hash.hashToHref = function (hash, type) { + return '/' + type + '/#' + hash; + }; + Hash.hrefToHash = function (href) { + var parsed = Hash.parsePadUrl(href); + return parsed.hash; + }; + Hash.getRelativeHref = function (href) { if (!href) { return; } if (href.indexOf('#') === -1) { return; } @@ -345,7 +411,7 @@ Version 1 secret.version = 2; secret.type = type; }; - if (!secretHash && !window.location.hash) { //!/#/.test(window.location.href)) { + if (!secretHash) { generate(); return secret; } else { @@ -355,12 +421,7 @@ Version 1 if (!type) { throw new Error("getSecrets with a hash requires a type parameter"); } parsed = parseTypeHash(type, secretHash); hash = secretHash; - } else { - var pHref = parsePadUrl(window.location.href); - parsed = pHref.hashData; - hash = pHref.hash; } - //var hash = secretHash || window.location.hash.slice(1); if (hash.length === 0) { generate(); return secret; @@ -496,8 +557,8 @@ Version 1 if (typeof(parsed.hashData.version) === "undefined") { return; } // pads and files should have a base64 (or hex) key if (parsed.hashData.type === 'pad' || parsed.hashData.type === 'file') { - if (!parsed.hashData.key) { return; } - if (!/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; } + if (!parsed.hashData.key && !parsed.hashData.channel) { return; } + if (parsed.hashData.key && !/^[a-zA-Z0-9+-/=]+$/.test(parsed.hashData.key)) { return; } } } return true; diff --git a/www/common/common-interface.js b/www/common/common-interface.js index 9cc1c1efb..6f9b4bed2 100644 --- a/www/common/common-interface.js +++ b/www/common/common-interface.js @@ -70,6 +70,7 @@ define([ if (typeof(yes) === 'function') { yes(e); } break; } + $(el || window).off('keydown', handler); }; $(el || window).keydown(handler); @@ -197,7 +198,7 @@ define([ frame.closeModal = function (cb) { $frame.fadeOut(150, function () { $frame.detach(); - cb(); + if (typeof(cb) === "function") { cb(); } }); }; return $frame.click(function (e) { @@ -217,14 +218,15 @@ define([ var titles = []; var active = 0; tabs.forEach(function (tab, i) { - if (!tab.content || !tab.title) { return; } + if (!(tab.content || tab.disabled) || !tab.title) { return; } var content = h('div.alertify-tabs-content', tab.content); - var title = h('span.alertify-tabs-title', tab.title); + var title = h('span.alertify-tabs-title'+ (tab.disabled ? '.disabled' : ''), tab.title); if (tab.icon) { var icon = h('i', {class: tab.icon}); $(title).prepend(' ').prepend(icon); } $(title).click(function () { + if (tab.disabled) { return; } var old = tabs[active]; if (old.onHide) { old.onHide(); } titles.forEach(function (t) { $(t).removeClass('alertify-tabs-active'); }); @@ -238,7 +240,7 @@ define([ }); titles.push(title); contents.push(content); - if (tab.active) { active = i; } + if (tab.active && !tab.disabled) { active = i; } }); if (contents.length) { $(contents[active]).addClass('alertify-tabs-content-active'); @@ -384,7 +386,7 @@ define([ buttons.forEach(function (b) { if (!b.name || !b.onClick) { return; } var button = h('button', { tabindex: '1', 'class': b.className || '' }, b.name); - $(button).click(function () { + var todo = function () { var noClose = b.onClick(); if (noClose) { return; } var $modal = $(button).parents('.alertify').first(); @@ -395,7 +397,17 @@ define([ } }); } - }); + }; + if (b.confirm) { + UI.confirmButton(button, { + classes: 'danger', + divClasses: 'left' + }, todo); + } else { + $(button).click(function () { + todo(); + }); + } if (b.keys && b.keys.length) { $(button).attr('data-keys', JSON.stringify(b.keys)); } navs.push(button); }); @@ -483,7 +495,7 @@ define([ stopListening(listener); cb(); }); - listener = listenForKeys(close, close); + listener = listenForKeys(close, close, frame); var $ok = $(ok).click(close); document.body.appendChild(frame); @@ -491,6 +503,11 @@ define([ $ok.focus(); Notifier.notify(); }); + + return { + element: frame, + delete: close + }; }; UI.prompt = function (msg, def, cb, opt, force) { @@ -582,7 +599,7 @@ define([ $ok.click(); }, function () { $cancel.click(); - }, ok); + }, frame); document.body.appendChild(frame); setTimeout(function () { @@ -593,6 +610,70 @@ define([ } }); }; + UI.confirmButton = function (originalBtn, config, _cb) { + config = config || {}; + var cb = Util.once(Util.mkAsync(_cb)); + var classes = 'btn ' + (config.classes || 'btn-primary'); + + var button = h('button', { + "class": classes, + title: config.title || '' + }, Messages.areYouSure); + var $button = $(button); + + var div = h('div', { + "class": config.classes || '' + }); + var timer = h('div.cp-button-timer', div); + + var content = h('div.cp-button-confirm', [ + button, + timer + ]); + if (config.divClasses) { + $(content).addClass(config.divClasses); + } + + var to; + + var done = function (res) { + if (res) { cb(res); } + clearTimeout(to); + $(content).detach(); + $(originalBtn).show(); + }; + + $button.click(function () { + done(true); + }); + + var TIMEOUT = 3000; + var INTERVAL = 10; + var i = 1; + + var todo = function () { + var p = 100 * ((TIMEOUT - (i * INTERVAL)) / TIMEOUT); + if (i++ * INTERVAL >= TIMEOUT) { + done(false); + return; + } + $(div).css('width', p+'%'); + to = setTimeout(todo, INTERVAL); + }; + + $(originalBtn).addClass('cp-button-confirm-placeholder').click(function () { + i = 1; + to = setTimeout(todo, INTERVAL); + $(originalBtn).hide().after(content); + }); + + return { + reset: function () { + done(false); + } + }; + }; + UI.proposal = function (content, cb) { var buttons = [{ @@ -1050,39 +1131,36 @@ define([ return radio; }; + var corner = { + queue: [], + state: false + }; UI.cornerPopup = function (text, actions, footer, opts) { opts = opts || {}; - var minimize = h('div.cp-corner-minimize.fa.fa-window-minimize'); - var maximize = h('div.cp-corner-maximize.fa.fa-window-maximize'); + var dontShowAgain = h('div.cp-corner-dontshow', [ + h('span.fa.fa-times'), + Messages.dontShowAgain + ]); + var popup = h('div.cp-corner-container', [ - minimize, - maximize, - h('div.cp-corner-filler', { style: "width:110px;" }), - h('div.cp-corner-filler', { style: "width:80px;" }), - h('div.cp-corner-filler', { style: "width:60px;" }), - h('div.cp-corner-filler', { style: "width:40px;" }), - h('div.cp-corner-filler', { style: "width:20px;" }), setHTML(h('div.cp-corner-text'), text), h('div.cp-corner-actions', actions), - setHTML(h('div.cp-corner-footer'), footer) + setHTML(h('div.cp-corner-footer'), footer), + opts.dontShowAgain ? dontShowAgain : undefined ]); var $popup = $(popup); - $(minimize).click(function () { - $popup.addClass('cp-minimized'); - }); - $(maximize).click(function () { - $popup.removeClass('cp-minimized'); - }); - if (opts.hidden) { $popup.addClass('cp-minimized'); } if (opts.big) { $popup.addClass('cp-corner-big'); } + if (opts.alt) { + $popup.addClass('cp-corner-alt'); + } var hide = function () { $popup.hide(); @@ -1092,9 +1170,35 @@ define([ }; var deletePopup = function () { $popup.remove(); + if (!corner.queue.length) { + // Make sure no other popup is displayed in the next 5s + setTimeout(function () { + if (corner.queue.length) { + $('body').append(corner.queue.pop()); + return; + } + corner.state = false; + }, 5000); + return; + } + setTimeout(function () { + $('body').append(corner.queue.pop()); + }, 5000); }; - $('body').append(popup); + $(dontShowAgain).click(function () { + deletePopup(); + if (typeof(opts.dontShowAgain) === "function") { + opts.dontShowAgain(); + } + }); + + if (corner.state) { + corner.queue.push(popup); + } else { + corner.state = true; + $('body').append(popup); + } return { popup: popup, @@ -1104,5 +1208,49 @@ define([ }; }; + UI.makeSpinner = function ($container) { + var $ok = $('', {'class': 'fa fa-check', title: Messages.saved}).hide(); + var $spinner = $('', {'class': 'fa fa-spinner fa-pulse'}).hide(); + + var state = false; + var to; + + var spin = function () { + clearTimeout(to); + state = true; + $ok.hide(); + $spinner.show(); + }; + var hide = function () { + clearTimeout(to); + state = false; + $ok.hide(); + $spinner.hide(); + }; + var done = function () { + clearTimeout(to); + state = false; + $ok.show(); + $spinner.hide(); + to = setTimeout(function () { + $ok.hide(); + }, 500); + }; + + if ($container && $container.append) { + $container.append($ok); + $container.append($spinner); + } + + return { + getState: function () { return state; }, + ok: $ok[0], + spinner: $spinner[0], + spin: spin, + hide: hide, + done: done + }; + }; + return UI; }); diff --git a/www/common/common-messaging.js b/www/common/common-messaging.js index feb3d79d3..15d0408f7 100644 --- a/www/common/common-messaging.js +++ b/www/common/common-messaging.js @@ -53,10 +53,18 @@ define([ return list; }; + Msg.declineFriendRequest = function (store, data, cb) { + store.mailbox.sendTo('DECLINE_FRIEND_REQUEST', {}, { + channel: data.notifications, + curvePublic: data.curvePublic + }, function (obj) { + cb(obj); + }); + }; Msg.acceptFriendRequest = function (store, data, cb) { var friend = getFriend(store.proxy, data.curvePublic) || {}; var myData = createData(store.proxy, friend.channel || data.channel); - store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', myData, { + store.mailbox.sendTo('ACCEPT_FRIEND_REQUEST', { user: myData }, { channel: data.notifications, curvePublic: data.curvePublic }, function (obj) { @@ -110,7 +118,7 @@ define([ var proxy = store.proxy; var friend = proxy.friends[curvePublic]; if (!friend) { return void cb({error: 'ENOENT'}); } - if (!friend.notifications || !friend.channel) { return void cb({error: 'EINVAL'}); } + if (!friend.notifications) { return void cb({error: 'EINVAL'}); } store.mailbox.sendTo('UNFRIEND', { curvePublic: proxy.curvePublic diff --git a/www/common/common-ui-elements.js b/www/common/common-ui-elements.js index eb1169994..f8a3711c0 100644 --- a/www/common/common-ui-elements.js +++ b/www/common/common-ui-elements.js @@ -14,7 +14,7 @@ define([ '/customize/application_config.js', '/customize/pages.js', '/bower_components/nthen/index.js', - '/common/invitation.js', + '/common/inner/invitation.js', 'css!/customize/fonts/cptools/style.css', '/bower_components/croppie/croppie.min.js', @@ -30,6 +30,13 @@ define([ }); } + UIElements.prettySize = function (bytes) { + var kB = Util.bytesToKilobytes(bytes); + if (kB < 1024) { return kB + Messages.KB; } + var mB = Util.bytesToMegabytes(bytes); + return mB + Messages.MB; + }; + UIElements.updateTags = function (common, href) { var existing, tags; NThen(function(waitFor) { @@ -56,6 +63,21 @@ define([ }); }; + var dcAlert; + UIElements.disconnectAlert = function () { + if (dcAlert && $(dcAlert.element).length) { return; } + dcAlert = UI.alert(Messages.common_connectionLost, undefined, true); + }; + UIElements.reconnectAlert = function () { + if (!dcAlert) { return; } + if (!dcAlert.delete) { + dcAlert = undefined; + return; + } + dcAlert.delete(); + dcAlert = undefined; + }; + var importContent = function (type, f, cfg) { return function () { var $files = $('', {type:"file"}); @@ -77,7 +99,7 @@ define([ }); }; }; - +/* var getPropertiesData = function (common, cb) { var data = {}; NThen(function (waitFor) { @@ -105,6 +127,43 @@ define([ cb(void 0, data); }); }; +*/ + var getPropertiesData = function (common, opts, cb) { + opts = opts || {}; + var data = {}; + NThen(function (waitFor) { + var base = common.getMetadataMgr().getPrivateData().origin; + common.getPadAttribute('', waitFor(function (err, val) { + if (err || !val) { + waitFor.abort(); + return void cb(err || 'EEMPTY'); + } + if (!val.fileType) { + delete val.owners; + delete val.expire; + } + Util.extend(data, val); + if (data.href) { data.href = base + data.href; } + if (data.roHref) { data.roHref = base + data.roHref; } + }), opts.href); + + // If this is a file, don't try to look for metadata + if (opts.channel && opts.channel.length > 34) { return; } + common.getPadMetadata({ + channel: opts.channel // optional, fallback to current pad + }, waitFor(function (obj) { + if (obj && obj.error) { return; } + data.owners = obj.owners; + data.expire = obj.expire; + data.pending_owners = obj.pending_owners; + })); + }).nThen(function () { + cb(void 0, data); + }); + }; + + +/* var createOwnerModal = function (common, data) { var friends = common.getFriends(true); var sframeChan = common.getSframeChannel(); @@ -212,15 +271,7 @@ define([ common.mailbox.sendTo("RM_OWNER", { channel: channel, title: data.title, - pending: pending, - user: { - displayName: user.name, - avatar: user.avatar, - profile: user.profile, - notifications: user.notifications, - curvePublic: user.curvePublic, - edPublic: priv.edPublic - } + pending: pending }, { channel: friend.notifications, curvePublic: friend.curvePublic @@ -363,15 +414,7 @@ define([ channel: channel, href: data.href, password: data.password, - title: data.title, - user: { - displayName: user.name, - avatar: user.avatar, - profile: user.profile, - notifications: user.notifications, - curvePublic: user.curvePublic, - edPublic: priv.edPublic - } + title: data.title }, { channel: friend.notifications, curvePublic: friend.curvePublic @@ -419,8 +462,8 @@ define([ var link = h('div.cp-share-columns', [ div1, div2 - /*drawRemove()[0], - drawAdd()[0]*/ + // drawRemove()[0], + //drawAdd()[0] ]); var linkButtons = [{ className: 'cancel', @@ -430,6 +473,8 @@ define([ }]; return UI.dialog.customModal(link, {buttons: linkButtons}); }; +*/ +/* var getRightsProperties = function (common, data, cb) { var $div = $('
'); if (!data) { return void cb(void 0, $div); } @@ -449,7 +494,7 @@ define([ var team = priv.teams[id] || {}; if (team.viewer) { return; } if (data.owners.indexOf(team.edPublic) === -1) { return; } - owned = id; + owned = Number(id); return true; }); } @@ -548,21 +593,27 @@ define([ if (!data.noPassword) { var hasPassword = data.password; + var $pwLabel = $('