================================================================================= FILE: server.js ================================================================================= /* * ================================================================================================= * HASKELLAI API SERVER - MAIN ENTRY POINT * ================================================================================================= * This file is responsible for configuration, database connections, and starting the servers. */ // --- Core Node.js and npm modules --- const https = require('https'); const fs = require('fs'); const path = require('path'); const express = require('express'); const cookieParser = require('cookie-parser'); const sodium = require('libsodium-wrappers'); const rateLimit = require('express-rate-limit'); // --- Application-specific modules --- const connectDB = require('./src/config/database'); const apiRouter = require('./src/api/routes'); const maintenanceService = require('./src/services/maintenance.service'); const serverTimingMiddleware = require('./src/api/middleware/serverTiming'); const addStorageHeaders = require('./src/api/middleware/addStorageHeaders'); const authenticateOptional = require('./src/api/middleware/authenticateOptional'); const emailService = require('./src/services/email.service'); // --- Specify the settings directory -- const SETTINGS_DIR = path.join(process.cwd(), 'settings'); // ================================================================================================= // SECTION 1: CONFIGURATION FROM FILES (As per original server.js) // ================================================================================================= console.log('Starting HaskellAIjs server...'); const PROD_PORT = Number(fs.readFileSync(path.join(SETTINGS_DIR, 'port.txt'), 'utf8').trim()); const TEST_PORT = PROD_PORT + 1; console.log(`Production Port: ${PROD_PORT}, Test Port: ${TEST_PORT}`); let SERVER_DOMAIN = 'localhost'; // Default fallback for safety try { const domainPath = path.join(SETTINGS_DIR, 'serverdomain.txt'); if (fs.existsSync(domainPath)) { SERVER_DOMAIN = fs.readFileSync(domainPath, 'utf8').trim(); console.log(`Server domain loaded from file: ${SERVER_DOMAIN}`); } else { throw new Error('serverdomain.txt not found.'); } } catch (error) { console.error(`CRITICAL ERROR: Could not read serverdomain.txt. ${error.message}`); console.error('CORS policy may not work correctly. Please create the file. Exiting.'); process.exit(1); } let PEPPER = 'I think, therefore cayenne.'; const pepperPath = path.join(SETTINGS_DIR, 'pepper.txt'); if (fs.existsSync(pepperPath)) { PEPPER = fs.readFileSync(pepperPath, 'utf8').replace(/\r/g, '').replace(/\n*$/, ''); } console.log(`Using pepper: "${PEPPER}"`); let TURN_SECRET = 'default-turn-secret-change-me'; // A fallback const turnSecretPath = path.join(SETTINGS_DIR, 'turnsecret.txt'); if (fs.existsSync(turnSecretPath)) { TURN_SECRET = fs.readFileSync(turnSecretPath, 'utf8').replace(/\r/g, '').replace(/\n*$/, ''); console.log(`TURN server secret loaded successfully.`); } else { console.warn(`WARNING: turnsecret.txt not found. Using a default, insecure TURN secret.`); } let siteOwnerEmail = null; const siteOwnerEmailPath = path.join(SETTINGS_DIR, 'siteowneremail.txt'); if (fs.existsSync(siteOwnerEmailPath)) { siteOwnerEmail = fs.readFileSync(siteOwnerEmailPath, 'utf8').trim().split('\n')[0].trim(); console.log(`Site owner email loaded: ${siteOwnerEmail}`); } else { console.log('No siteowneremail.txt found.'); } let STORE_PLAINTEXT_EMAILS = false; const storeEmailsPath = path.join(SETTINGS_DIR, 'storeplaintextemails.txt'); if (fs.existsSync(storeEmailsPath)) { STORE_PLAINTEXT_EMAILS = fs.readFileSync(storeEmailsPath, 'utf8').trim().toLowerCase() === 'true'; if (STORE_PLAINTEXT_EMAILS) console.log(`Configuration to store plaintext emails is ENABLED.`); } let ENABLE_LEGACY_VALIDATION = false; const legacyValidationPath = path.join(SETTINGS_DIR, 'enablelegacyvalidation.txt'); if (fs.existsSync(legacyValidationPath)) { ENABLE_LEGACY_VALIDATION = fs.readFileSync(legacyValidationPath, 'utf8').trim().toLowerCase() === 'true'; if (ENABLE_LEGACY_VALIDATION) console.log(`Legacy session validation fallback is ENABLED.`); } const LEGACY_SESSIONS_FILE = path.join(SETTINGS_DIR, 'sessions.txt'); const TLS_OPTIONS = { key: fs.readFileSync(path.join(SETTINGS_DIR, 'privkey.pem')), cert: fs.readFileSync(path.join(SETTINGS_DIR, 'fullchain.pem')) }; console.log('TLS certificates loaded from settings directory.'); let ALLOW_ALL_REGISTRATIONS = false; let ALLOWED_DOMAINS = new Set(); let ALLOWED_EMAILS = new Set(); const DEFAULT_GLOBAL_LIMIT = 16 * 1024 * 1024; // 16 MB let GLOBAL_LIMIT = DEFAULT_GLOBAL_LIMIT; let DOMAIN_LIMITS = new Map(); let EMAIL_LIMITS = new Map(); try { // Helper to parse TSV files for quotas const parseQuotaFile = (content) => { const map = new Map(); content.split('\n').forEach(line => { const parts = line.trim().toLowerCase().split(/\s+/); // Split by whitespace (tab or space) if (parts[0]) { // The first part is always the email/domain const key = parts[0]; // The second part is the limit, if it exists and is a valid number const limit = parts.length > 1 ? parseInt(parts[1], 10) : null; if (!isNaN(limit) && limit !== null) { map.set(key, limit); } } }); return map; }; const globalLimitPath = path.join(SETTINGS_DIR, 'globallimit.txt'); if (fs.existsSync(globalLimitPath)) { const limitFromFile = parseInt(fs.readFileSync(globalLimitPath, 'utf8').trim(), 10); if (!isNaN(limitFromFile)) { GLOBAL_LIMIT = limitFromFile; } } console.log(`Global storage quota set to: ${GLOBAL_LIMIT} bytes.`); const domainsPath = path.join(SETTINGS_DIR, 'alloweddomains.txt'); const emailsPath = path.join(SETTINGS_DIR, 'allowedemails.txt'); const domainsContent = fs.readFileSync(domainsPath, 'utf8').trim().toLowerCase(); const emailsContent = fs.readFileSync(emailsPath, 'utf8').trim().toLowerCase(); EMAIL_LIMITS = parseQuotaFile(emailsContent); DOMAIN_LIMITS = parseQuotaFile(domainsContent); console.log(`Loaded ${EMAIL_LIMITS.size} email-specific quotas and ${DOMAIN_LIMITS.size} domain-specific quotas.`); const domainsForRegistration = domainsContent.split('\n').map(l => l.trim().split(/\s+/)[0]); const emailsForRegistration = emailsContent.split('\n').map(l => l.trim().split(/\s+/)[0]); if (domainsForRegistration.includes('*') || emailsForRegistration.includes('*')) { ALLOW_ALL_REGISTRATIONS = true; console.log('Registration policy: ALLOW ALL EMAILS.'); } else { ALLOWED_DOMAINS = new Set(domainsForRegistration.filter(d => d)); ALLOWED_EMAILS = new Set(emailsForRegistration.filter(e => e)); console.log(`Registration policy: Restricted to ${ALLOWED_DOMAINS.size} domains and ${ALLOWED_EMAILS.size} specific emails.`); } } catch (error) { console.error(`CRITICAL ERROR: Could not read alloweddomains.txt or allowedemails.txt. ${error.message}`); console.error('Both files must exist in the settings directory for the server to start. Exiting.'); process.exit(1); } // ================================================================================================= // SECTION 2: EXPRESS APP FACTORY // ================================================================================================= function createExpressApp(dbConnection, isTestServer, otherDbContext = null) { const app = express(); app.set('trust proxy', 1); app.use(serverTimingMiddleware); app.use((req, res, next) => { res.setHeader('Access-Control-Allow-Origin', `https://${SERVER_DOMAIN}`); res.setHeader('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS'); res.setHeader('Access-Control-Allow-Headers', 'Content-Type, Accept, Origin, Authorization, X-User-Email, X-Server-Key, X-Session-ID, X-Query-String, X-Compress-Payload, X-Encrypt, X-Encrypt-Payload, X-Expires-In-Seconds, X-Metadata, X-Payload-Metadata, X-Tags, X-Recipient-Emails, X-Shared-Domain, X-Shared-Field, X-Access-Control, X-Admin-Emails, X-Authorized-Emails, X-Admin-Email-Hashes, X-Authorized-Email-Hashes'); res.setHeader('Access-Control-Expose-Headers', 'Content-Type, Content-Disposition, Content-Encoding, Content-Length, ETag, X-ID, X-Server-Time, X-Encrypted, X-Compressed, X-Expires-At, X-Message-Metadata, X-Payload-Metadata, X-Owner-Is-Site-Owner, X-User-Is-Admin, X-Sender-Email, X-Signer-Email, X-Admin-Emails, X-Admin-Hashes, X-Authorized-Emails, X-Authorized-Hashes, X-Validation-Status-Metadata, X-Validation-Signer-Metadata, X-Validation-Status-Full, X-Validation-Signer-Full, X-Storage-Quota, X-Storage-Usage'); res.setHeader('Access-Control-Allow-Credentials', 'true'); if (req.method === 'OPTIONS') { return res.status(204).end(); } next(); }); // --- MIDDLEWARE ORDER --- app.use(cookieParser()); app.use((req, res, next) => { req.db = dbConnection; req.db.allowDrop = isTestServer; if (otherDbContext) { req.otherDb = otherDbContext; } req.pepper = PEPPER; req.siteOwnerEmail = siteOwnerEmail; req.turnSecret = TURN_SECRET; req.storePlaintextEmails = STORE_PLAINTEXT_EMAILS; req.enableLegacyValidation = ENABLE_LEGACY_VALIDATION; req.legacySessionsFile = LEGACY_SESSIONS_FILE; req.allowAllRegistrations = ALLOW_ALL_REGISTRATIONS; req.allowedDomains = ALLOWED_DOMAINS; req.allowedEmails = ALLOWED_EMAILS; req.globalLimit = GLOBAL_LIMIT; req.domainLimits = DOMAIN_LIMITS; req.emailLimits = EMAIL_LIMITS; next(); }); app.use(authenticateOptional); app.use(addStorageHeaders); // 5. Apply the new three-tier rate limiting strategy. // We apply limiters from most specific to most general. // --- TIER 1: HIGH-VOLUME LIMITER (200,000/min) --- const highVolumeLimiter = rateLimit({ windowMs: 60 * 1000, max: 200000, message: { error: 'Too many high-volume requests from this IP address.' } }); // Apply to ID-based payload GETs app.get('/api/userdata/:id/payload', highVolumeLimiter); app.get('/api/shareddata/:id/payload', highVolumeLimiter); app.get('/api/mail/:id', highVolumeLimiter); // Apply to chunk/file POSTs app.post('/api/userdata', highVolumeLimiter); app.post('/api/shareddata', highVolumeLimiter); app.post('/api/mail', highVolumeLimiter); // --- TIER 2: SENSITIVE LIMITER (5/min) --- const sensitiveApiLimiter = rateLimit({ windowMs: 60 * 1000, max: 50, message: { error: 'Too many sensitive requests from this IP address.' } }); app.use('/api/auth/sessions', sensitiveApiLimiter); app.use('/api/auth/request-registration', sensitiveApiLimiter); app.use('/api/auth/complete-registration', sensitiveApiLimiter); app.use('/api/auth/request-password-reset', sensitiveApiLimiter); app.use('/api/auth/perform-password-reset', sensitiveApiLimiter); app.use('/api/auth/change-password', sensitiveApiLimiter); app.use('/api/keys/lookup', sensitiveApiLimiter); // --- TIER 3: GENERAL API LIMITER (500/min) --- const highVolumePostRegex = /^\/api\/(userdata|shareddata|mail)\/?$/; const highVolumeGetRegex = /^\/api\/(userdata|shareddata|mail)\/[^/]+$/; const sensitivePaths = new Set([ '/api/auth/sessions', '/api/auth/request-registration', '/api/auth/complete-registration', '/api/auth/request-password-reset', '/api/auth/perform-password-reset', '/api/auth/change-password', '/api/keys/lookup' ]); const generalApiLimiter = rateLimit({ windowMs: 60 * 1000, max: 200000, message: { error: 'Too many general API requests from this IP address.' }, skip: (req) => { const path = req.path; // Use req.path which is relative to the router mount // Skip if it's a high-volume POST request if (req.method === 'POST' && highVolumePostRegex.test(path)) { return true; } // Skip if it's a high-volume GET payload request // Note: /mail/:id doesn't have /payload, so we check it differently if (req.method === 'GET' && (path.endsWith('/payload') || highVolumeGetRegex.test(path))) { return true; } // Skip if it's a sensitive path if (sensitivePaths.has(`/api${path}`)) { // Compare against the full path return true; } return false; } }); // Apply the general limiter to the entire API. The skip function ensures // it doesn't double-count requests handled by the other limiters. app.use('/api', generalApiLimiter); // Mount the main API router. All requests will have passed through the limiters. app.use('/api', apiRouter); return app; } // ================================================================================================= // SECTION 3: SERVER STARTUP // ================================================================================================= (async () => { try { await emailService.init(); await sodium.ready; console.log('Libsodium cryptographic library initialized successfully.'); // 1. Connect to BOTH the production and testing databases. console.log('Connecting to databases...'); const [prodDbContext, testDbContext] = await Promise.all([ connectDB('production'), connectDB('testing') ]); console.log('Database contexts established for both environments.'); // 2. Create the special "hybrid" context for the test server. // It uses PRODUCTION for auth models but TESTING for data models. const hybridTestDbContext = { conn: testDbContext.conn, gfs: testDbContext.gfs, allowDrop: true, // Flag it as the test context models: { // Auth models point to PRODUCTION UserKey: prodDbContext.models.UserKey, PendingValidation: prodDbContext.models.PendingValidation, // Data models point to TESTING UserData: testDbContext.models.UserData, SharedData: testDbContext.models.SharedData, Mail: testDbContext.models.Mail, ServerMaintenance: testDbContext.models.ServerMaintenance, } }; console.log('Hybrid database context for test server created.'); // 3. Create both Express apps with their correct contexts. const prodApp = createExpressApp(prodDbContext, false, testDbContext); const testApp = createExpressApp(hybridTestDbContext, true, prodDbContext); console.log('Production and Test Express apps created.'); // 4. Create and start both HTTPS servers. const prodServer = https.createServer(TLS_OPTIONS, prodApp); const testServer = https.createServer(TLS_OPTIONS, testApp); prodServer.listen(PROD_PORT, () => { console.log(`\u2705 Production server is running on port ${PROD_PORT}`); }); testServer.listen(TEST_PORT, () => { console.log(`\u2705 Testing server is running on port ${TEST_PORT}`); }); // 5. Schedule periodic maintenance tasks for both databases. console.log('Scheduling periodic GridFS cleanup tasks...'); setInterval(() => maintenanceService.cleanOrphanedGridFSFiles(prodDbContext), 24 * 60 * 60 * 1000); setInterval(() => maintenanceService.cleanOrphanedGridFSFiles(testDbContext), 24 * 60 * 60 * 1000); // --- QUOTA: START --- // 6. Run the one-time bootstrap for storage quotas. // This can be removed after it has run once on the production server. await maintenanceService.bootstrapStorageQuotas(prodDbContext, testDbContext); // 7. Schedule the daily reconciliation for test storage. console.log('Scheduling daily reconciliation of test storage usage...'); setInterval(() => maintenanceService.recalculateTestStorageUsage(prodDbContext, testDbContext), 24 * 60 * 60 * 1000); // --- QUOTA: END --- } catch (err) { console.error('\u274c’ Fatal server startup error:', err); process.exit(1); } })(); ================================================================================= FILE: src/utils/api.utils.js ================================================================================= const zstd = require('@mongodb-js/zstd'); const zlib = require('zlib'); /** * A standardized error handler for API controllers. * @param {object} res The Express response object. * @param {Error} error The error object. */ function handleApiError(res, error) { console.error('API Error:', error.message, `\n${error.stack || '(no stack trace)'}`); res.status(error.statusCode || 500).json({ error: error.message || 'An internal server error occurred.' }); }; /** * Compresses and sends a JSON response, respecting the client's Accept-Encoding header. * @param {object} req The Express request object. * @param {object} res The Express response object. * @param {object} data The JavaScript object to be stringified and sent. */ async function sendCompressedJson(req, res, data) { // This is lifted directly from your original server.js const jsonString = JSON.stringify(data); const acceptEncoding = req.headers['accept-encoding'] || ''; res.setHeader('Content-Type', 'application/json'); try { if (acceptEncoding.includes('zstd')) { const compressedPayload = await zstd.compress(Buffer.from(jsonString, 'utf-8')); res.setHeader('Content-Encoding', 'zstd'); res.status(200).send(compressedPayload); } else if (acceptEncoding.includes('gzip')) { const compressedPayload = zlib.gzipSync(jsonString); res.setHeader('Content-Encoding', 'gzip'); res.status(200).send(compressedPayload); } else { res.status(200).send(jsonString); } } catch (compressionError) { console.error('Failed to compress JSON response:', compressionError); res.status(200).send(jsonString); } } /** * Stringifies a JSON object with its keys sorted alphabetically. * This ensures a consistent, deterministic output for cryptographic signing. * NOTE: This is a shallow sort. For deeply nested objects, a more complex * recursive function would be needed. For the flat payloadMetadata, this is perfect. * @param {object} obj The object to stringify. * @returns {string} The canonically stringified JSON. */ function canonicalStringify(obj) { const allKeys = Object.keys(obj).sort(); const newObj = {}; for (const key of allKeys) { newObj[key] = obj[key]; } return JSON.stringify(newObj); } module.exports = { handleApiError, sendCompressedJson, canonicalStringify }; ================================================================================= FILE: src/config/database.js ================================================================================= const mongoose = require('mongoose'); const { GridFSBucket } = require('mongodb'); // Import all schemas const UserKeySchema = require('../api/models/UserKey'); const UserDataSchema = require('../api/models/UserData'); const SharedDataSchema = require('../api/models/SharedData'); const MailSchema = require('../api/models/Mail'); const ServerMaintenanceSchema = require('../api/models/ServerMaintenance'); const PendingValidationSchema = require('../api/models/PendingValidation'); // Define connection strings, as in original server.js const PROD_MONGODB_URI = `mongodb://127.0.0.1:27017/haskellaijs?replicaSet=rs0`; const TEST_MONGODB_URI = `mongodb://127.0.0.1:27017/haskellaijs_testing2?replicaSet=rs0`; const connectDB = async (environment) => { const isProduction = environment === 'production'; const uri = isProduction ? PROD_MONGODB_URI : TEST_MONGODB_URI; try { const conn = await mongoose.createConnection(uri).asPromise(); console.log(`Successfully connected to MongoDB: ${conn.name}`); // Compile models from schemas on the specific connection const models = { UserKey: conn.model('UserKey', UserKeySchema), UserData: conn.model('UserData', UserDataSchema), SharedData: conn.model('SharedData', SharedDataSchema), Mail: conn.model('Mail', MailSchema), ServerMaintenance: conn.model('ServerMaintenance', ServerMaintenanceSchema), PendingValidation: conn.model('PendingValidation', PendingValidationSchema) }; // Create GridFSBucket instance const gfs = new GridFSBucket(conn.db, { bucketName: 'uploads' }); // Return the complete DB context object, which will be attached to req.db return { conn, models, gfs }; } catch (err) { console.error(`MongoDB connection error for ${environment}:`, err); throw err; // Re-throw error to be caught by the main startup function } }; module.exports = connectDB; ================================================================================= FILE: src/api/middleware/authenticateOptional.js ================================================================================= const cryptoService = require('../../services/crypto.service'); const sodium = require('libsodium-wrappers'); /** * Express middleware that OPTIONALLY authenticates a user. * If a valid session cookie exists, it populates req.userKeys and req.user. * If not, it simply calls next() without error, allowing for public access. */ const authenticateOptional = async (req, res, next) => { try { // Step 1: Get BOTH cookies. If either is missing, proceed without auth. const { xsessionkey, email } = req.cookies; if (!xsessionkey || !email) { return next(); } const [sessionId, sessionSecret] = xsessionkey.split('-'); if (!sessionId || !sessionSecret) { // Invalid cookie format, proceed without auth. return next(); } const { UserKey } = req.db.models; const { pepper } = req; // Step 2: Hash the email from the cookie for the indexed lookup. const emailHash = cryptoService.simpleHash(email, pepper); // Step 3: Perform the efficient, indexed query. const user = await UserKey.findOne({ ownerSimpleHash: emailHash, 'sessions.sessionId': sessionId, status: 'active' }); // If no user is found, proceed without auth. if (!user) { return next(); } // Step 4: Verify the session and decrypt the keys. const session = user.sessions.find(s => s.sessionId === sessionId); if (!session) { return next(); } const sessionKeyBuffer = Buffer.from(sessionSecret, 'hex'); const xserverkey = cryptoService.decryptSecretKey(session.encryptedXServerKey, sessionKeyBuffer); if (!xserverkey) { return next(); } const userKeys = cryptoService.decryptUserKeys(user, xserverkey); if (!userKeys) { return next(); } // Step 5: SUCCESS! Authentication context is valid. Attach it to the request. req.plaintextEmail = email; // The verified plaintext email req.userKeys = userKeys; // The decrypted cryptographic keys req.user = user; // The full Mongoose user document req.sessionId = sessionId; // The specific session ID being used // Proceed to the next middleware/controller with the populated request object. next(); } catch (error) { // If any unexpected error occurs, log it for debugging but do not block the request. console.error('Optional Authentication Middleware Error:', error); next(); } }; module.exports = authenticateOptional; ================================================================================= FILE: src/api/middleware/rateLimiters.js ================================================================================= const rateLimit = require('express-rate-limit'); const banHammerLimiter = rateLimit({ windowMs: 60 * 60 * 1000, max: 1, message: { error: 'Suspicious activity detected. This IP has been temporarily blocked for one hour.' }, keyGenerator: (req) => req.ip, standardHeaders: false, }); const failedLoginLimiter = rateLimit({ windowMs: 5 * 60 * 1000, max: 5, standardHeaders: true, legacyHeaders: false, message: { error: 'Too many failed authentication attempts from this IP. Please try again in 5 minutes.' }, keyGenerator: (req) => req.ip, // --- IMPROVEMENT: Only count failed (401) requests towards the limit --- // This is more robust than manually incrementing in the controller. skip: (req, res) => res.statusCode !== 401, // --- END IMPROVEMENT --- handler: (req, res, next, options) => { console.warn(`Blocked suspicious login activity from IP: ${req.ip} for user: ${req.body.email || 'N/A'}`); banHammerLimiter(req, res, () => { res.status(options.statusCode).send(options.message); }); } }); const sensitiveActionLimiter = rateLimit({ windowMs: 15 * 60 * 1000, max: 10, standardHeaders: true, legacyHeaders: false, message: { error: 'Too many requests of this type from this IP. Please try again later.'} }); module.exports = { failedLoginLimiter, sensitiveActionLimiter }; ================================================================================= FILE: src/api/middleware/addStorageHeaders.js ================================================================================= /** * Express middleware that intercepts outgoing responses to add storage quota headers. * If a user is authenticated on the request, it re-fetches their latest storage * usage and quota limit from the database and attaches them as response headers. * This ensures the client always receives the most up-to-date information, * reflecting any changes made during the current request. */ const addStorageHeaders = (req, res, next) => { // Store the original res.send and res.end methods const originalSend = res.send; const originalEnd = res.end; // A helper function to perform the logic. We'll call this from our wrappers. const addHeaders = async () => { // 1. Check if the user was authenticated. The `authenticateOptional` middleware // populates `req.user` if a valid session exists. if (req.user && !res.headersSent) { try { // 2. Re-fetch the user document from the database. This is crucial because // `req.user` is a snapshot from the START of the request. If the user // uploaded or deleted a file, its values will be stale. This new query // gets the absolute latest values *after* the controller logic has run. const { UserKey } = req.db.models; const freshUser = await UserKey.findById(req.user.id).lean(); if (freshUser) { // 3. Calculate total usage from both prod and test stores. const usage = (freshUser.storageUsedProd || 0) + (freshUser.storageUsedTest || 0); const quota = freshUser.quotaLimit; // 4. Set the headers. res.setHeader('X-Storage-Usage', usage.toString()); res.setHeader('X-Storage-Quota', quota.toString()); } } catch (error) { // If anything goes wrong, log it but don't crash the request. // The user will simply not get the headers on this response. console.error('Error adding storage headers:', error); } } }; // Wrap res.send res.send = async function (body) { await addHeaders(); // Call the original function to complete the response return originalSend.call(this, body); }; // Wrap res.end (for responses sent without a body, e.g., res.status(204).end()) res.end = async function (...args) { await addHeaders(); return originalEnd.apply(this, args); } // Continue to the next middleware in the chain next(); }; module.exports = addStorageHeaders; ================================================================================= FILE: src/api/middleware/serverTiming.js ================================================================================= // This middleware calculates the total time a request takes to process on the server // and adds it to the response as an 'X-Server-Time' header. It's a direct // port from the original server.js. const serverTimingMiddleware = (req, res, next) => { const start = performance.now(); // The original res.send function is stored. const originalSend = res.send; // We wrap res.send with our own function. res.send = function (body) { // Only set the header if it hasn't been sent already. if (!res.headersSent) { const totalTime = performance.now() - start; res.setHeader('X-Server-Time', `${totalTime.toFixed(2)}ms`); } // Call the original res.send to complete the response. return originalSend.call(this, body); }; next(); }; module.exports = serverTimingMiddleware; ================================================================================= FILE: src/api/middleware/noCache.js ================================================================================= // This middleware sets the 'Cache-Control' header to 'no-store', instructing // browsers and proxies not to cache the response. This is critical for dynamic // API endpoints where the data can change frequently. const noCache = (req, res, next) => { res.setHeader('Cache-Control', 'no-store'); next(); }; module.exports = noCache; ================================================================================= FILE: src/api/middleware/authenticate.js ================================================================================= const cryptoService = require('../../services/crypto.service'); const authenticate = async (req, res, next) => { // The global authenticateOptional middleware has already run on this request. // We just need to check if it was successful. if (req.userKeys) { // Success! The user is authenticated. Proceed to the controller. return next(); } else { // Failure. The user is not authenticated, but this route requires it. // Send a 401 Unauthorized response and stop the request chain. return res.status(401).json({ error: 'Authentication required.' }); } }; module.exports = authenticate; ================================================================================= FILE: src/api/middleware/decodeHeaders.js ================================================================================= const decodeBase64Headers = (req, res, next) => { for (const key in req.headers) { const value = req.headers[key]; if (typeof value === 'string' && value.startsWith('B64:')) { try { const base64Data = value.substring(4); const decodedValue = Buffer.from(base64Data, 'base64').toString('utf-8'); req.headers[key] = decodedValue; } catch (e) { console.warn(`Failed to decode B64 header '${key}'. Leaving as is. Error: ${e.message}`); } } } next(); }; module.exports = decodeBase64Headers; ================================================================================= FILE: src/api/controllers/auth.controller.js ================================================================================= const crypto = require('crypto'); const fs = require('fs/promises'); const cryptoService = require('../../services/crypto.service'); const tokenService = require('../../services/token.service'); const emailService =require('../../services/email.service'); const sodium = require('libsodium-wrappers'); const { handleApiError } = require('../../utils/api.utils'); const quotaService = require('../../services/quota.service'); async function cleanupUserData(dbContext, emailHash, session) { if (!dbContext) return; // Safety check for environments where there's no "otherDb" console.log(`[Password Reset Cleanup] Starting cleanup for hash ${emailHash} on DB: ${dbContext.conn.name}`); const { UserData, Mail, SharedData } = dbContext.models; const { gfs } = dbContext; // Clean UserData (only encrypted data is unrecoverable and must be deleted) const userDataToDelete = await UserData.find({ ownerSimpleHash: emailHash, isEncrypted: true }).session(session).lean(); if (userDataToDelete.length > 0) { const payloadIds = userDataToDelete.map(doc => doc.payload).filter(id => id); if (payloadIds.length > 0) { for (const id of payloadIds) { const fileExists = await gfs.find({ _id: id }, { session }).limit(1).toArray(); if (fileExists.length > 0) await gfs.delete(id, { session }); } } await UserData.deleteMany({ _id: { $in: userDataToDelete.map(d => d._id) } }).session(session); console.log(`[${dbContext.conn.name}] Cleaned ${userDataToDelete.length} encrypted UserData documents.`); } // Clean Mail: remove as recipient, then delete orphaned mail await Mail.updateMany({ recipientEmailHashes: emailHash }, { $pull: { recipientEmailHashes: emailHash }, $unset: { [`wrappedKeys.${emailHash}`]: '' } }).session(session); const orphanedMails = await Mail.find({ recipientEmailHashes: { $size: 0 } }).session(session).lean(); if (orphanedMails.length > 0) { const payloadIds = orphanedMails.map(doc => doc.payload).filter(id => id); if (payloadIds.length > 0) { for (const id of payloadIds) { const fileExists = await gfs.find({ _id: id }, { session }).limit(1).toArray(); if (fileExists.length > 0) await gfs.delete(id, { session }); } } await Mail.deleteMany({ _id: { $in: orphanedMails.map(d => d._id) } }).session(session); console.log(`[${dbContext.conn.name}] Cleaned ${orphanedMails.length} orphaned Mail documents.`); } // Clean SharedData: remove permissions, then delete documents where user was the last admin const sharedDocsToModify = await SharedData.find({ $or: [{ adminEmailHashes: emailHash }, { authorizedEmailHashes: emailHash }] }).session(session); const docsToDelete = []; for (const doc of sharedDocsToModify) { doc.adminEmailHashes.pull(emailHash); doc.authorizedEmailHashes.pull(emailHash); if (doc.adminEmailHashes.length === 0) { docsToDelete.push(doc); } else { await doc.save({ session }); } } if (docsToDelete.length > 0) { const payloadIds = docsToDelete.map(doc => doc.payload).filter(id => id); if (payloadIds.length > 0) { for (const id of payloadIds) { const fileExists = await gfs.find({ _id: id }, { session }).limit(1).toArray(); if (fileExists.length > 0) await gfs.delete(id, { session }); } } await SharedData.deleteMany({ _id: { $in: docsToDelete.map(d => d._id) } }).session(session); console.log(`[${dbContext.conn.name}] Cleaned ${docsToDelete.length} orphaned SharedData documents.`); } console.log(`[Password Reset Cleanup] Finished cleanup on DB: ${dbContext.conn.name}`); } exports.requestRegistration = async (req, res) => { try { const email = req.body.email ? req.body.email.toLowerCase() : null; if (!email) { throw { statusCode: 400, message: 'Email is required.' }; } // --- NEW CODE STARTS HERE --- if (!req.allowAllRegistrations) { const domain = email.substring(email.lastIndexOf("@") + 1); const isAllowed = req.allowedEmails.has(email) || req.allowedDomains.has(domain); if (!isAllowed) { console.log(`Registration blocked for non-allowlisted email: ${email}`); // Return the generic message to prevent leaking the registration policy. return res.status(200).json({ message: 'If an account with this email does not exist, a registration link will be sent.' }); } } // --- NEW CODE ENDS HERE --- const { UserKey, PendingValidation } = req.db.models; const { pepper } = req; const existingUser = await UserKey.findOne({ ownerSimpleHash: cryptoService.simpleHash(email, pepper) }); if (existingUser) { return res.status(200).json({ message: 'If an account with this email does not exist, a registration link will be sent.' }); } const { token, tokenHash } = tokenService.generateToken(); const expiresAt = new Date(Date.now() + 15 * 60 * 1000); await PendingValidation.create({ tokenHash, email, type: 'registration', expiresAt }); await emailService.sendRegistrationEmail(email, token); res.status(200).json({ message: 'If an account with this email does not exist, a registration link will be sent.' }); } catch (error) { handleApiError(res, error); } }; exports.completeRegistration = async (req, res) => { try { const { token, xserverkey } = req.body; if (!token || !xserverkey) { throw { statusCode: 400, message: 'Token and xserverkey are required.' }; } if (!/^[a-f0-9]{64}$/.test(xserverkey)) { throw { statusCode: 400, message: 'xserverkey must be a 64-character hex string.' }; } const { UserKey, PendingValidation } = req.db.models; const { pepper } = req; const tokenHash = tokenService.hashToken(token); const pending = await PendingValidation.findOne({ tokenHash: tokenHash }); if (!pending || pending.type !== 'registration' || new Date() > pending.expiresAt) { throw { statusCode: 400, message: 'Invalid or expired registration token.' }; } const boxKeyPair = sodium.crypto_box_keypair(); const signKeyPair = sodium.crypto_sign_keypair(); const serverKeyBuffer = Buffer.from(xserverkey, 'hex'); // Resolve the quota limit for the new user based on current rules. const userQuotaLimit = quotaService.resolveUserQuota(pending.email, req); await UserKey.create({ ownerSimpleHash: cryptoService.simpleHash(pending.email, pepper), storageUsedProd: 0, storageUsedTest: 0, quotaLimit: userQuotaLimit, publicKey: sodium.to_base64(boxKeyPair.publicKey), signingPublicKey: sodium.to_base64(signKeyPair.publicKey), encryptedBoxSecretKey: cryptoService.encryptSecretKey(Buffer.from(boxKeyPair.privateKey), serverKeyBuffer), encryptedSignSecretKey: cryptoService.encryptSecretKey(Buffer.from(signKeyPair.privateKey), serverKeyBuffer), status: 'active', sessions: [] }); await pending.deleteOne(); res.status(201).json({ success: true, message: 'Account created successfully. You can now log in.' }); } catch (error) { handleApiError(res, error); } }; exports.createSession = async (req, res) => { try { const email = req.body.email ? req.body.email.toLowerCase() : null; const { xserverkey } = req.body; if (!email || !xserverkey) { throw { statusCode: 400, message: 'Email and xserverkey are required.' }; } if (!/^[a-f0-9]{64}$/.test(xserverkey)) { throw { statusCode: 400, message: 'xserverkey must be a 64-character hex string.' }; } const { UserKey } = req.db.models; const { pepper } = req; const emailHash = cryptoService.simpleHash(email, pepper); let user = await UserKey.findOne({ ownerSimpleHash: emailHash, status: 'active' }); if (!user && req.enableLegacyValidation) { user = await validateAndMigrateLegacyUser(req, email, emailHash, xserverkey, UserKey); } if (!user) { throw { statusCode: 401, message: 'Authentication failed. Invalid credentials.' }; } // This is the primary sync point. On every login, we re-calculate the user's // quota from the latest rules and update their record if it has changed. const currentQuota = quotaService.resolveUserQuota(email, req); if (user.quotaLimit !== currentQuota) { user.quotaLimit = currentQuota; // The save will happen below, no need for a separate DB call. console.log(`Quota for user ${email} updated to ${currentQuota} bytes upon login.`); } const serverKeyBuffer = Buffer.from(xserverkey, 'hex'); const decryptedKey = cryptoService.decryptSecretKey(user.encryptedBoxSecretKey, serverKeyBuffer); if (!decryptedKey) { throw { statusCode: 401, message: 'Authentication failed. Invalid credentials.' }; } const sessionId = crypto.randomBytes(16).toString('hex'); const sessionSecret = sodium.to_hex(sodium.randombytes_buf(32)); const sessionKeyBuffer = Buffer.from(sessionSecret, 'hex'); const encryptedXServerKey = cryptoService.encryptSecretKey(serverKeyBuffer, sessionKeyBuffer); user.sessions.push({ sessionId, encryptedXServerKey }); const MAX_SESSIONS = 5; if (user.sessions.length > MAX_SESSIONS) { user.sessions.splice(0, user.sessions.length - MAX_SESSIONS); } if (req.storePlaintextEmails) { user.ownerEmail = email; } await user.save(); const cookieValue = `${sessionId}-${sessionSecret}`; res.cookie('xsessionkey', cookieValue, { httpOnly: true, secure: true, sameSite: 'Lax', maxAge: 10 * 365 * 24 * 60 * 60 * 1000 }); res.cookie('email', email, { httpOnly: true, secure: true, sameSite: 'Lax', maxAge: 10 * 365 * 24 * 60 * 60 * 1000 }); res.status(200).json({ success: true, message: 'Login successful.' }); } catch (error) { handleApiError(res, error); } }; exports.logout = async (req, res) => { try { const { user, sessionId } = req; user.sessions.pull({ sessionId: sessionId }); await user.save(); res.clearCookie('xsessionkey'); res.clearCookie('email'); res.status(200).json({ success: true, message: 'Logged out successfully.' }); } catch(error) { handleApiError(res, error); } }; exports.logoutAll = async (req, res) => { try { const { user } = req; user.sessions = []; await user.save(); res.clearCookie('xsessionkey'); res.clearCookie('email'); res.status(200).json({ success: true, message: 'Logged out from all devices successfully.' }); } catch(error) { handleApiError(res, error); } }; exports.requestPasswordReset = async (req, res) => { try { const email = req.body.email ? req.body.email.toLowerCase() : null; if (!email) { throw { statusCode: 400, message: 'Email is required.' }; } const { UserKey, PendingValidation } = req.db.models; const { pepper } = req; const user = await UserKey.findOne({ ownerSimpleHash: cryptoService.simpleHash(email, pepper), status: 'active' }); if (user) { const { token, tokenHash } = tokenService.generateToken(); const expiresAt = new Date(Date.now() + 15 * 60 * 1000); await PendingValidation.create({ tokenHash, email, type: 'password_reset', expiresAt }); await emailService.sendResetPasswordEmail(email, token); } res.status(200).json({ message: 'If an account with this email exists, a password reset link will be sent.' }); } catch(error) { handleApiError(res, error); } }; exports.performPasswordReset = async (req, res) => { // A session for the primary database context (the one native to the server) const primarySession = await req.db.conn.startSession(); // A session for the *other* database context, which we injected in app.js const otherSession = req.otherDb ? await req.otherDb.conn.startSession() : null; try { const { token, new_xserverkey } = req.body; if (!token || !new_xserverkey) { throw { statusCode: 400, message: 'Token and new_xserverkey are required.' }; } if (!/^[a-f0-9]{64}$/.test(new_xserverkey)) { throw { statusCode: 400, message: 'new_xserverkey must be a 64-character hex string.' }; } const { PendingValidation } = req.db.models; const { pepper } = req; const tokenHash = tokenService.hashToken(token); // --- Step 1: Validate the reset token first. --- const pending = await PendingValidation.findOne({ tokenHash: tokenHash }); if (!pending || pending.type !== 'password_reset' || new Date() > pending.expiresAt) { throw { statusCode: 400, message: 'Invalid or expired password reset token.' }; } const emailHash = cryptoService.simpleHash(pending.email, pepper); // --- Step 2: Clean data from the primary database context. --- await primarySession.withTransaction(async (session) => { await cleanupUserData(req.db, emailHash, session); }); // --- Step 3: Clean data from the *other* database context (if it exists). --- if (otherSession) { await otherSession.withTransaction(async (session) => { await cleanupUserData(req.otherDb, emailHash, session); }); } // --- Step 4: ONLY AFTER successful cleanup, perform the destructive key operations. --- // UserKey model always points to the production DB, even on the test server. const { UserKey } = req.db.models; await UserKey.updateMany({ ownerSimpleHash: emailHash }, { $set: { status: 'archived', sessions: [] } }); const boxKeyPair = sodium.crypto_box_keypair(); const signKeyPair = sodium.crypto_sign_keypair(); const serverKeyBuffer = Buffer.from(new_xserverkey, 'hex'); const userQuotaLimit = quotaService.resolveUserQuota(pending.email, req); await UserKey.create({ // This does not need a session as it's the final atomic step ownerSimpleHash: emailHash, storageUsedProd: 0, storageUsedTest: 0, quotaLimit: userQuotaLimit, publicKey: sodium.to_base64(boxKeyPair.publicKey), signingPublicKey: sodium.to_base64(signKeyPair.publicKey), encryptedBoxSecretKey: cryptoService.encryptSecretKey(Buffer.from(boxKeyPair.privateKey), serverKeyBuffer), encryptedSignSecretKey: cryptoService.encryptSecretKey(Buffer.from(signKeyPair.privateKey), serverKeyBuffer), status: 'active', sessions: [] }); await pending.deleteOne(); console.log(`[Password Reset] Keys for ${pending.email} have been successfully reset across all environments.`); res.status(200).json({ success: true, message: 'Password has been reset. You can now log in with your new password.' }); } catch (error) { if (primarySession.inTransaction()) await primarySession.abortTransaction(); if (otherSession && otherSession.inTransaction()) await otherSession.abortTransaction(); handleApiError(res, error); } finally { await primarySession.endSession(); if (otherSession) await otherSession.endSession(); } }; exports.changePassword = async (req, res) => { try { const { email, oldKeyHex, newKeyHex } = req.body; if (!email || !oldKeyHex || !newKeyHex) { throw { statusCode: 400, message: 'email, oldKeyHex, and newKeyHex are required.' }; } const { UserKey } = req.db.models; const { pepper } = req; const user = await UserKey.findOne({ ownerSimpleHash: cryptoService.simpleHash(email, pepper), status: 'active' }); if (!user) { throw { statusCode: 401, message: 'Authentication failed.' }; } const oldKeyBuffer = Buffer.from(oldKeyHex, 'hex'); const boxSecretKey = cryptoService.decryptSecretKey(user.encryptedBoxSecretKey, oldKeyBuffer); const signSecretKey = cryptoService.decryptSecretKey(user.encryptedSignSecretKey, oldKeyBuffer); if (!boxSecretKey || !signSecretKey) { throw { statusCode: 401, message: 'Authentication failed. Old password appears to be incorrect.' }; } const newKeyBuffer = Buffer.from(newKeyHex, 'hex'); user.encryptedBoxSecretKey = cryptoService.encryptSecretKey(boxSecretKey, newKeyBuffer); user.encryptedSignSecretKey = cryptoService.encryptSecretKey(signSecretKey, newKeyBuffer); user.sessions = []; await user.save(); res.clearCookie('xsessionkey'); res.clearCookie('email'); res.status(200).json({ success: true, message: 'Password updated successfully. All sessions have been logged out. Please log in again.' }); } catch (error) { handleApiError(res, error); } }; /** * Validates the session from the cookies and returns the user's email if valid. * Designed for simple script-based session checking (e.g., curl in bash). * Relies on the global `authenticateOptional` middleware. */ exports.validateSession = async (req, res) => { // The authenticateOptional middleware has already run. // If it was successful, req.plaintextEmail will be populated. if (req.plaintextEmail) { // Valid session: respond with the email and a 200 OK status. res.status(200).type('text/plain').send(req.plaintextEmail); } else { // Invalid session: respond with an empty string and a 200 OK status. res.status(200).type('text/plain').send(''); } }; /** * Handles legacy session validation and migrates a user to the new system if validation succeeds. * @param {object} req The Express request object, containing config and cookies. * @param {string} email The email address the user is attempting to log in with. * @param {string} emailHash The simpleHash of the email. * @param {string} xserverkey The new server key provided by the user. * @param {mongoose.Model} UserKey The UserKey Mongoose model. * @returns {Promise} The newly created user document or null if validation fails. */ async function validateAndMigrateLegacyUser(req, email, emailHash, xserverkey, UserKey) { console.log(`User ${email} not found. Attempting legacy validation fallback.`); const { sessionid: legacySessionId } = req.cookies; if (!legacySessionId) { return null; } const legacySessionHash = crypto.createHash('sha256').update(legacySessionId).digest('hex').substring(0, 32); try { const sessionsData = await fs.readFile(req.legacySessionsFile, 'utf8'); const lines = sessionsData.split('\n'); let legacyEmail = null; for (const line of lines) { const parts = line.split('\t'); if (parts.length >= 2 && parts[1].trim() === legacySessionHash) { legacyEmail = parts[0].trim(); break; } } if (legacyEmail && legacyEmail.toLowerCase() === email) { console.log(`Legacy session validated for ${email}. Migrating user to new system.`); const boxKeyPair = sodium.crypto_box_keypair(); const signKeyPair = sodium.crypto_sign_keypair(); const serverKeyBuffer = Buffer.from(xserverkey, 'hex'); const newUser = await UserKey.create({ ownerSimpleHash: emailHash, publicKey: sodium.to_base64(boxKeyPair.publicKey), signingPublicKey: sodium.to_base64(signKeyPair.publicKey), encryptedBoxSecretKey: cryptoService.encryptSecretKey(Buffer.from(boxKeyPair.privateKey), serverKeyBuffer), encryptedSignSecretKey: cryptoService.encryptSecretKey(Buffer.from(signKeyPair.privateKey), serverKeyBuffer), status: 'active', sessions: [] }); console.log(`Successfully created new UserKey for migrated user ${email}.`); return newUser; } } catch (fileError) { if (fileError.code !== 'ENOENT') { console.warn(`Could not read or parse legacy sessions file. Skipping legacy check. Error: ${fileError.message}`); } } return null; // Return null if validation fails for any reason } ================================================================================= FILE: src/api/controllers/shareddata.controller.js ================================================================================= const { Readable } = require('stream'); const sodium = require('libsodium-wrappers'); const contentDisposition = require('content-disposition'); const { handleApiError, sendCompressedJson, canonicalStringify } = require('../../utils/api.utils'); const cryptoService = require('../../services/crypto.service'); const signatureService = require('../../services/signature.service'); const quotaService = require('../../services/quota.service'); const zstd = require('@mongodb-js/zstd'); const zlib = require('zlib'); const mongoose = require('mongoose'); const GRIDFS_CHUNK_SIZE_BYTES = 512 * 1024; async function verifyAdminPrivilege(doc, userKeys, plaintextEmail, db, pepper) { const userEmailHash = userKeys.ownerSimpleHash; if (doc.isEncrypted) { const wrappedKeyBinary = doc.wrappedKeys[userEmailHash] || doc.wrappedKeys.get(userEmailHash); if (!wrappedKeyBinary) throw { statusCode: 403, message: 'You do not have permission to access this encrypted resource.' }; const wrappedKey = Buffer.isBuffer(wrappedKeyBinary) ? wrappedKeyBinary : Buffer.from(wrappedKeyBinary.buffer); const metadataCiphertext = Buffer.isBuffer(doc.encryptedMetadata) ? doc.encryptedMetadata : Buffer.from(doc.encryptedMetadata.buffer); const dataFileKey = sodium.crypto_box_seal_open(wrappedKey, userKeys.publicKey, userKeys.secretKey); if (!dataFileKey) throw { statusCode: 500, message: 'Internal error: Failed to decrypt key for privilege check.' }; const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const decryptedMetadataJson = sodium.crypto_secretbox_open_easy(metadataCiphertext, nonce, key); if (!decryptedMetadataJson) throw { statusCode: 500, message: 'Internal error: Failed to decrypt metadata for privilege check.' }; const metadata = JSON.parse(sodium.to_string(decryptedMetadataJson)); if (!metadata.adminEmails || !metadata.adminEmails.includes(plaintextEmail)) { throw { statusCode: 403, message: 'You do not have administrative privilege for this resource.' }; } return metadata; } else { if (!doc.adminEmailHashes || !doc.adminEmailHashes.includes(userEmailHash)) { throw { statusCode: 403, message: 'You do not have administrative privilege for this resource.' }; } return doc.metadata; } } async function processAndSendSharedData(doc, req, res) { const userEmailHash = req.userKeys ? req.userKeys.ownerSimpleHash : null; const { gfs } = req.db; const { pepper, siteOwnerEmail } = req; const siteOwnerEmailSimpleHash = siteOwnerEmail ? cryptoService.simpleHash(siteOwnerEmail, pepper) : null; let canAccess = false; if (doc.isEncrypted) { if (!req.userKeys) throw { statusCode: 401, message: 'Authentication is required to access this encrypted resource.' }; if (doc.authorizedEmailHashes && doc.authorizedEmailHashes.includes(userEmailHash)) canAccess = true; } else { switch (doc.accessControl) { case 'public': canAccess = true; break; case 'authenticated': if (!req.userKeys) throw { statusCode: 401, message: 'Authentication is required.' }; canAccess = true; break; case 'restricted': if (!userEmailHash) throw { statusCode: 401, message: 'Authentication is required.' }; canAccess = doc.authorizedEmailHashes.includes(userEmailHash); break; } } if (!canAccess) throw { statusCode: 403, message: 'You do not have permission to access this resource.' }; let payloadFromDb = Buffer.alloc(0); if (doc.payload) { const chunks = []; const downloadStream = gfs.openDownloadStream(doc.payload); for await (const chunk of downloadStream) chunks.push(chunk); payloadFromDb = Buffer.concat(chunks); } let finalMetadata, payloadForSending; if (doc.isEncrypted) { const wrappedKeyBinary = doc.wrappedKeys[userEmailHash] || doc.wrappedKeys.get(userEmailHash); if (!wrappedKeyBinary) throw { statusCode: 403, message: 'Your key is not present on this resource.' }; const wrappedKey = Buffer.from(wrappedKeyBinary.buffer || wrappedKeyBinary); const dataFileKey = sodium.crypto_box_seal_open(wrappedKey, req.userKeys.publicKey, req.userKeys.secretKey); if (!dataFileKey) throw { statusCode: 403, message: 'Failed to decrypt key.' }; const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); if (payloadFromDb.length > 0) { const decryptedPayload = sodium.crypto_secretbox_open_easy(payloadFromDb, nonce, key); if (!decryptedPayload) throw { statusCode: 500, message: 'Failed to decrypt payload.' }; payloadForSending = Buffer.from(decryptedPayload); } else { payloadForSending = payloadFromDb; } const metadataCiphertext = Buffer.from(doc.encryptedMetadata.buffer || doc.encryptedMetadata); const decryptedMetadataJson = sodium.crypto_secretbox_open_easy(metadataCiphertext, nonce, key); if (!decryptedMetadataJson) throw { statusCode: 500, message: 'Failed to decrypt metadata.' }; finalMetadata = JSON.parse(sodium.to_string(decryptedMetadataJson)); } else { finalMetadata = doc.metadata; payloadForSending = payloadFromDb; } const keyDbContext = req.db; const validationResult = await signatureService.validateSignatures(finalMetadata, doc.isEncrypted, keyDbContext, pepper, payloadForSending); const mimeType = finalMetadata.payloadMetadata?.['mime-type'] || 'application/octet-stream'; res.setHeader('Content-Type', mimeType); if (finalMetadata.payloadMetadata?.filename) { res.setHeader('Content-Disposition', contentDisposition(finalMetadata.payloadMetadata.filename)); } let payloadToSend = payloadForSending; if (finalMetadata.isCompressed) { if (req.headers['accept-encoding']?.includes('zstd')) { res.setHeader('Content-Encoding', 'zstd'); } else { const decompressed = await zstd.decompress(payloadToSend); payloadToSend = zlib.gzipSync(decompressed); res.setHeader('Content-Encoding', 'gzip'); } } res.setHeader('Content-Length', payloadToSend.length.toString()); res.setHeader('X-Encrypted', String(doc.isEncrypted)); res.setHeader('X-Owner-Is-Site-Owner', String(doc.ownerSimpleHash === siteOwnerEmailSimpleHash)); res.setHeader('X-Compressed', finalMetadata.isCompressed ? 'true' : 'false'); res.setHeader('X-Validation-Status-Metadata', validationResult.valStatusMeta); if (validationResult.signerMeta) res.setHeader('X-Validation-Signer-Metadata', validationResult.signerMeta); res.setHeader('X-Validation-Status-Full', validationResult.valStatusFull); if (validationResult.signerFull) res.setHeader('X-Validation-Signer-Full', validationResult.signerFull); if (finalMetadata.signerEmail) res.setHeader('X-Signer-Email', finalMetadata.signerEmail); if (finalMetadata.adminEmails?.length > 0) res.setHeader('X-Admin-Emails', finalMetadata.adminEmails.join(',')); let userIsAdmin = false; if (req.userKeys) { if (doc.isEncrypted) { userIsAdmin = finalMetadata.adminEmails?.includes(req.plaintextEmail); } else { userIsAdmin = finalMetadata.adminEmailHashes?.includes(userEmailHash); } } res.setHeader('X-User-Is-Admin', String(userIsAdmin)); if(userIsAdmin && finalMetadata.adminEmails) res.setHeader('X-Admin-Emails', finalMetadata.adminEmails.join(',')); if(userIsAdmin && finalMetadata.adminEmailHashes) res.setHeader('X-Admin-Hashes', finalMetadata.adminEmailHashes.join(',')); if(userIsAdmin && finalMetadata.authorizedEmails) res.setHeader('X-Authorized-Emails', finalMetadata.authorizedEmails.join(',')); if(userIsAdmin && finalMetadata.authorizedEmailHashes) res.setHeader('X-Authorized-Hashes', finalMetadata.authorizedEmailHashes.join(',')); res.setHeader('X-Message-Metadata', JSON.stringify(finalMetadata.tags || {})); if (doc.expiresAt) res.setHeader('X-Expires-At', doc.expiresAt.toUTCString()); res.setHeader('X-Payload-Metadata', JSON.stringify(finalMetadata.payloadMetadata || {})); res.setHeader('X-ID', doc._id.toString()); res.status(200).send(payloadToSend); } exports.createSharedData = async (req, res) => { let session; try { const { models, gfs } = req.db; const { pepper } = req; const { SharedData, UserKey } = models; const ownerKeys = req.userKeys; const ownerPlaintextEmail = req.plaintextEmail; const domain = req.header('X-Shared-Domain'); const field = req.header('X-Shared-Field'); if (!domain || !field) throw { statusCode: 400, message: 'X-Shared-Domain and X-Shared-Field headers are required.' }; const domainHash = cryptoService.simpleHash(domain, pepper); const fieldHash = cryptoService.simpleHash(field, pepper); const existingDocForPermsCheck = await SharedData.findOne({ domainHash, fieldHash }); let existingDocMetadata; if (existingDocForPermsCheck) { existingDocMetadata = await verifyAdminPrivilege(existingDocForPermsCheck, ownerKeys, ownerPlaintextEmail, req.db, pepper); } const finalOwnerHash = existingDocForPermsCheck ? existingDocForPermsCheck.ownerSimpleHash : ownerKeys.ownerSimpleHash; const finalOwnerUserDoc = await UserKey.findOne({ ownerSimpleHash: finalOwnerHash, status: 'active' }); if (!finalOwnerUserDoc) { throw { statusCode: 404, message: 'The owner for this shared data record could not be found.' }; } if (req.db.allowDrop) { const oldUncompressedSize = existingDocMetadata?.payloadMetadata?.payloadSize || 0; const newUncompressedSize = req.body.length; quotaService.checkQuota(finalOwnerUserDoc, newUncompressedSize, oldUncompressedSize); } const adminEmailInput = (req.header('X-Admin-Emails') || '').toLowerCase().split(/[\s,]+/).filter(e => e).map(e => e.trim()); const authorizedEmailsInput = (req.header('X-Authorized-Emails') || '').toLowerCase().split(/[\s,]+/).filter(e => e).map(e => e.trim()); const baseAdminEmails = adminEmailInput.length > 0 ? adminEmailInput : (existingDocMetadata?.adminEmails || [ownerPlaintextEmail]); const finalAdminEmails = [...new Set(baseAdminEmails)]; if (finalAdminEmails.length === 0) { throw { statusCode: 400, message: 'A resource must have at least one administrator.' }; } const finalAuthorizedEmails = [...new Set([...finalAdminEmails, ...authorizedEmailsInput])]; let recipientKeyMap = new Map(); if (finalAuthorizedEmails.length > 0) { const finalAuthorizedEmailHashes = finalAuthorizedEmails.map(e => cryptoService.simpleHash(e, pepper)); const recipientDocs = await UserKey.find({ ownerSimpleHash: { $in: finalAuthorizedEmailHashes }, status: 'active' }); if (recipientDocs.length !== finalAuthorizedEmails.length) { const foundHashes = new Set(recipientDocs.map(d => d.ownerSimpleHash)); const missingEmails = finalAuthorizedEmails.filter(email => !foundHashes.has(cryptoService.simpleHash(email, pepper))); throw { statusCode: 404, message: `Could not find or verify all specified users: ${missingEmails.join(', ')}` }; } recipientKeyMap = new Map(recipientDocs.map(doc => [doc.ownerSimpleHash, doc.publicKey])); } session = await SharedData.db.client.startSession(); let result, isUpdateOperation = false; let chargeDetailsForUpdate = null; let refundDetailsForUpdate = null; await session.withTransaction(async () => { const existingDoc = await SharedData.findOne({ domainHash, fieldHash }).session(session); isUpdateOperation = !!existingDoc; const isEncrypted = req.header('X-Encrypt') !== 'false'; const accessControl = req.header('X-Access-Control') || (isEncrypted ? 'restricted' : 'public'); const adminEmailHashesInput = (req.header('X-Admin-Email-Hashes') || '').split(/[\s,]+/).filter(e => e).map(e => e.trim()); const authorizedEmailHashesInput = (req.header('X-Authorized-Email-Hashes') || '').split(/[\s,]+/).filter(e => e).map(e => e.trim()); const finalAdminEmailHashes = [...new Set([...finalAdminEmails.map(e => cryptoService.simpleHash(e, pepper)), ...adminEmailHashesInput].filter(e => e))]; const finalAuthorizedEmailHashes = [...new Set([...finalAuthorizedEmails.map(e => cryptoService.simpleHash(e, pepper)), ...authorizedEmailHashesInput].filter(e => e))]; const expiresInSeconds = parseInt(req.header('X-Expires-In-Seconds'), 10); let compress = req.header('X-Compress-Payload') === 'true'; const tags = JSON.parse(req.header('X-Tags') || '{}'); const clientPayloadMetadata = JSON.parse(req.header('X-Payload-Metadata') || '{}'); const tagHashes = Object.entries(tags).map(([key, value]) => ({ keyHash: cryptoService.simpleHash(key, pepper), valueHash: cryptoService.simpleHash(String(value), pepper), _id: false })); const uncompressedPayload = req.body; let payloadForStorage = uncompressedPayload; let compressedSize = null; if (compress && payloadForStorage.length > 0) { payloadForStorage = await zstd.compress(payloadForStorage); compressedSize = payloadForStorage.length; } else { compress = false; } const finalPayloadMetadata = { ...clientPayloadMetadata, payloadSize: uncompressedPayload.length, }; if (compressedSize !== null) { finalPayloadMetadata.compressedSize = compressedSize; } const newOwnerHash = existingDoc ? existingDoc.ownerSimpleHash : ownerKeys.ownerSimpleHash; let docToSave = { domainHash, fieldHash, tagHashes, isEncrypted, ownerSimpleHash: newOwnerHash }; if (isEncrypted) { docToSave.accessControl = 'restricted'; docToSave.adminEmailHashes = finalAdminEmailHashes; docToSave.authorizedEmailHashes = finalAuthorizedEmailHashes; const dataFileKey = sodium.randombytes_buf(sodium.crypto_secretbox_KEYBYTES + sodium.crypto_secretbox_NONCEBYTES); const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const metadataToEncrypt = { adminEmails: finalAdminEmails, authorizedEmails: finalAuthorizedEmails, signerEmail: ownerPlaintextEmail, signingPublicKey: sodium.to_base64(ownerKeys.signingPublicKey), payloadMetadata: finalPayloadMetadata, isCompressed: compress, serverTimestamp: new Date().toISOString(), tags: tags, domain: domain, field: field }; const payloadMetaBuffer = Buffer.from(canonicalStringify(finalPayloadMetadata)); const fullBufferToSign = Buffer.concat([payloadForStorage, payloadMetaBuffer]); metadataToEncrypt.signature_meta = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, payloadMetaBuffer)), ownerKeys.signingSecretKey)); metadataToEncrypt.signature_full = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, fullBufferToSign)), ownerKeys.signingSecretKey)); docToSave.encryptedMetadata = Buffer.from(sodium.crypto_secretbox_easy(JSON.stringify(metadataToEncrypt), nonce, key)); payloadForStorage = Buffer.from(sodium.crypto_secretbox_easy(payloadForStorage, nonce, key)); docToSave.wrappedKeys = new Map(); finalAuthorizedEmails.forEach(email => { const emailHash = cryptoService.simpleHash(email, pepper); const publicKey_b64 = recipientKeyMap.get(emailHash); if (publicKey_b64) { const wrapped = Buffer.from(sodium.crypto_box_seal(dataFileKey, sodium.from_base64(publicKey_b64))); docToSave.wrappedKeys.set(emailHash, wrapped); } }); } else { docToSave.accessControl = accessControl; docToSave.adminEmailHashes = finalAdminEmailHashes; if (docToSave.accessControl === 'restricted') docToSave.authorizedEmailHashes = finalAuthorizedEmailHashes; const metadata = { adminEmailHashes: docToSave.adminEmailHashes, signerHash: cryptoService.simpleHash(ownerPlaintextEmail, pepper), signingPublicKey: sodium.to_base64(ownerKeys.signingPublicKey), payloadMetadata: finalPayloadMetadata, isCompressed: compress, serverTimestamp: new Date().toISOString(), tags: tags, domain: domain, field: field }; const payloadMetaBuffer = Buffer.from(canonicalStringify(finalPayloadMetadata)); const fullBufferToSign = Buffer.concat([payloadForStorage, payloadMetaBuffer]); metadata.signature_meta = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, payloadMetaBuffer)), ownerKeys.signingSecretKey)); metadata.signature_full = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, fullBufferToSign)), ownerKeys.signingSecretKey)); docToSave.metadata = metadata; } if (existingDoc) { const oldCompressedSize = await quotaService.getPayloadSize(existingDoc.payload, gfs, session); if (oldCompressedSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { await UserKey.updateOne( { ownerSimpleHash: existingDoc.ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -oldCompressedSize } }, { session } ); } else { refundDetailsForUpdate = { owner: existingDoc.ownerSimpleHash, size: oldCompressedSize }; } } if (existingDoc.payload) { const fileExists = await gfs.find({ _id: existingDoc.payload }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(existingDoc.payload, { session }); } } await models.SharedData.deleteOne({ _id: existingDoc._id }, { session }); } if (payloadForStorage.length > 0) { const uploadStream = gfs.openUploadStream(finalPayloadMetadata.filename || 'payload', { session, chunkSizeBytes: GRIDFS_CHUNK_SIZE_BYTES }); Readable.from(payloadForStorage).pipe(uploadStream); await new Promise((resolve, reject) => { uploadStream.on('finish', resolve); uploadStream.on('error', reject); }); docToSave.payload = uploadStream.id; } result = await new models.SharedData(docToSave).save({ session }); const newCompressedSize = payloadForStorage.length; if (newCompressedSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { const otherStorageField = 'storageUsedTest'; const updateResult = await UserKey.updateOne( { ownerSimpleHash: newOwnerHash, status: 'active', $expr: { $or: [ { $eq: ['$quotaLimit', -1] }, { $lte: [ { $add: [`$${storageField}`, `$${otherStorageField}`, uncompressedPayload.length] }, '$quotaLimit' ] } ] } }, { $inc: { [storageField]: newCompressedSize } }, { session } ); if (updateResult.modifiedCount === 0) { throw { statusCode: 413, message: 'Payload exceeds owner\'s storage quota (atomic check failed).' }; } } else { chargeDetailsForUpdate = { owner: newOwnerHash, size: newCompressedSize }; } } }); if (req.db.allowDrop && refundDetailsForUpdate) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: refundDetailsForUpdate.owner, status: 'active' }, { $inc: { [storageField]: -refundDetailsForUpdate.size } } ); } if (req.db.allowDrop && chargeDetailsForUpdate) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: chargeDetailsForUpdate.owner, status: 'active' }, { $inc: { [storageField]: chargeDetailsForUpdate.size } } ); } res.status(isUpdateOperation ? 200 : 201).json({ success: true, operation: isUpdateOperation ? 'updated' : 'created', id: result._id, expiresAt: result.expiresAt || null }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.getSharedData = async (req, res) => { try { const { pepper } = req; const { models } = req.db; const { siteOwnerEmail } = req; const siteOwnerEmailSimpleHash = siteOwnerEmail ? cryptoService.simpleHash(siteOwnerEmail, pepper) : null; const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const { domain, field } = query; if (!domain) throw { statusCode: 400, message: 'A "domain" query parameter is required.' }; const filter = { domainHash: cryptoService.simpleHash(domain, pepper) }; if (field) filter.fieldHash = cryptoService.simpleHash(field, pepper); const tagHashesFilter = []; Object.keys(query).forEach(key => { if (key.startsWith('tags.')) { const tagKey = key.substring(5); const tagValue = query[key]; if (typeof tagValue === 'string') { tagHashesFilter.push({ keyHash: cryptoService.simpleHash(tagKey, pepper), valueHash: cryptoService.simpleHash(tagValue, pepper) }); } } }); if (tagHashesFilter.length > 0) filter.tagHashes = { $all: tagHashesFilter }; const docs = await models.SharedData.find(filter).lean(); const results = []; const userEmailHash = req.userKeys ? req.userKeys.ownerSimpleHash : null; for (const doc of docs) { if (doc.accessControl !== 'public' && !userEmailHash) continue; let metadata, canAccess = false; if (doc.isEncrypted) { if(req.userKeys && doc.authorizedEmailHashes && doc.authorizedEmailHashes.includes(userEmailHash)) { const wrappedKeyBinary = doc.wrappedKeys[userEmailHash]; if(wrappedKeyBinary) { const wrappedKey = Buffer.from(wrappedKeyBinary.buffer || wrappedKeyBinary); const dataFileKey = sodium.crypto_box_seal_open(wrappedKey, req.userKeys.publicKey, req.userKeys.secretKey); if(dataFileKey) { const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const metadataCiphertext = Buffer.from(doc.encryptedMetadata.buffer || doc.encryptedMetadata); const decryptedMetadataJson = sodium.crypto_secretbox_open_easy(metadataCiphertext, nonce, key); if(decryptedMetadataJson) { metadata = JSON.parse(sodium.to_string(decryptedMetadataJson)); canAccess = true; } } } } } else { switch (doc.accessControl) { case 'public': canAccess = true; break; case 'authenticated': canAccess = !!req.userKeys; break; case 'restricted': canAccess = !!userEmailHash && doc.authorizedEmailHashes.includes(userEmailHash); break; } if(canAccess) metadata = doc.metadata; } if(canAccess) { const keyDbContext = req.db; const validationResult = await signatureService.validateSignatures(metadata, doc.isEncrypted, keyDbContext, pepper); let userIsAdmin = false; if (req.userKeys) { if (doc.isEncrypted) { userIsAdmin = metadata.adminEmails?.includes(req.plaintextEmail); } else { userIsAdmin = doc.adminEmailHashes?.includes(userEmailHash); } } const resultItem = { id: doc._id, isEncrypted: doc.isEncrypted, isCompressed: metadata.isCompressed, accessControl: doc.accessControl, ownerIsSiteOwner: doc.ownerSimpleHash === siteOwnerEmailSimpleHash, userIsAdmin: userIsAdmin, expiresAt: doc.expiresAt, validation: { status: validationResult.valStatusMeta, signer: validationResult.signerMeta }, signerEmail: metadata.signerEmail, signerHash: metadata.signerHash, tags: metadata.tags || {}, domain: metadata.domain, field: metadata.field, payloadMetadata: metadata.payloadMetadata }; if(userIsAdmin && metadata.adminEmails) resultItem.adminEmails = metadata.adminEmails; if(userIsAdmin && metadata.adminEmailHashes) resultItem.adminEmailHashes = metadata.adminEmailHashes; if(userIsAdmin && metadata.authorizedEmails) resultItem.authorizedEmails = metadata.authorizedEmails; if(userIsAdmin && metadata.authorizedEmailHashes) resultItem.authorizedEmailHashes = metadata.authorizedEmailHashes; results.push(resultItem); } } await sendCompressedJson(req, res, results); } catch (e) { handleApiError(res, e); } }; exports.getSharedDataPayloadById = async (req, res) => { try { const doc = await req.db.models.SharedData.findById(req.params.id).lean(); if (!doc) throw { statusCode: 404, message: 'Shared data not found.' }; const etag = `"${doc._id.toString()}"`; res.setHeader('ETag', etag); // ADD THIS LINE. 'private' is a safe default for shared data. res.setHeader('Cache-Control', 'private, max-age=31536000, immutable'); if (req.header('if-none-match') === etag) return res.status(304).end(); await processAndSendSharedData(doc, req, res); } catch(e) { handleApiError(res, e); } }; exports.getSharedDataPayloadByQuery = async (req, res) => { try { const { pepper } = req; const { models } = req.db; const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const { domain, field } = query; if (!domain || !field) { throw { statusCode: 400, message: 'Query parameters "domain" and "field" are required.' }; } const domainHash = cryptoService.simpleHash(domain, pepper); const fieldHash = cryptoService.simpleHash(field, pepper); const doc = await models.SharedData.findOne({ domainHash, fieldHash }).lean(); if (!doc) { throw { statusCode: 404, message: 'Shared data not found for the specified domain and field.' }; } await processAndSendSharedData(doc, req, res); } catch(e) { handleApiError(res, e); } }; exports.deleteSharedDataByQuery = async (req, res) => { let session; try { const { models, gfs } = req.db; const { pepper } = req; session = await models.SharedData.db.client.startSession(); let adminDocIdsToDelete = []; let deletedCount = 0; const refundsForUpdate = new Map(); await session.withTransaction(async () => { const { UserKey } = models; const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const { domain, field } = query; if (!domain) throw { statusCode: 400, message: 'A "domain" query parameter is required for deletion.' }; const baseFilter = { domainHash: cryptoService.simpleHash(domain, pepper) }; if(field) baseFilter.fieldHash = cryptoService.simpleHash(field, pepper); const allMatchingDocs = await models.SharedData.find(baseFilter).session(session); const docsUserCanAdmin = []; for (const doc of allMatchingDocs) { try { await verifyAdminPrivilege(doc, req.userKeys, req.plaintextEmail, req.db, pepper); docsUserCanAdmin.push(doc); } catch(err) { /* Ignore docs user doesn't administer */ } } if (docsUserCanAdmin.length === 0) { throw { statusCode: 403, message: 'No matching data found that you have permission to delete.'}; } const payloadsByOwner = new Map(); const fileIdsToDelete = []; adminDocIdsToDelete = docsUserCanAdmin.map(doc => doc._id); for (const doc of docsUserCanAdmin) { if (doc.payload) { fileIdsToDelete.push(doc.payload); const payloads = payloadsByOwner.get(doc.ownerSimpleHash) || []; payloads.push(doc.payload); payloadsByOwner.set(doc.ownerSimpleHash, payloads); } } for (const [ownerHash, payloadIds] of payloadsByOwner.entries()) { // Pass the session to make the find operation transactional. const files = await gfs.find({ _id: { $in: payloadIds } }, { session }).project({ length: 1 }).toArray(); const sizeToDecrement = files.reduce((sum, file) => sum + file.length, 0); if (sizeToDecrement > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { await UserKey.updateOne( { ownerSimpleHash: ownerHash, status: 'active' }, { $inc: { [storageField]: -sizeToDecrement } }, { session } ); } else { refundsForUpdate.set(ownerHash, (refundsForUpdate.get(ownerHash) || 0) + sizeToDecrement); } } } if (fileIdsToDelete.length > 0) { for (const id of fileIdsToDelete) { const fileExists = await gfs.find({ _id: id }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(id, { session }); } } } const deleteResult = await models.SharedData.deleteMany({ _id: { $in: adminDocIdsToDelete } }, { session }); deletedCount = deleteResult.deletedCount; if (deletedCount === 0) { throw { statusCode: 404, message: 'No matching data found (possibly deleted by a concurrent request).' }; } }); if (req.db.allowDrop && refundsForUpdate.size > 0) { const storageField = 'storageUsedTest'; const { UserKey } = models; for (const [ownerHash, size] of refundsForUpdate.entries()) { await UserKey.updateOne( { ownerSimpleHash: ownerHash, status: 'active' }, { $inc: { [storageField]: -size } } ); } } res.status(200).json({ success: true, deletedCount: deletedCount, deletedIds: adminDocIdsToDelete.map(id => id.toString()) }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.deleteSharedDataById = async (req, res) => { let session; try { const { models, gfs } = req.db; const { pepper } = req; const { SharedData, UserKey } = models; session = await SharedData.db.client.startSession(); let wasDeleted = false; let refundDetailsForUpdate = null; await session.withTransaction(async () => { const { id } = req.params; const doc = await models.SharedData.findById(id).session(session); if (!doc) throw { statusCode: 404, message: 'No matching data found.' }; await verifyAdminPrivilege(doc, req.userKeys, req.plaintextEmail, req.db, pepper); const payloadSize = await quotaService.getPayloadSize(doc.payload, gfs, session); if (doc.payload) { const fileExists = await gfs.find({ _id: doc.payload }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(doc.payload, { session }); } } const deleteResult = await models.SharedData.deleteOne({ _id: doc._id }, { session }); if (deleteResult.deletedCount > 0 && payloadSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { await UserKey.updateOne( { ownerSimpleHash: doc.ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -payloadSize } }, { session } ); } else { refundDetailsForUpdate = { owner: doc.ownerSimpleHash, size: payloadSize }; } } if (deleteResult.deletedCount > 0) wasDeleted = true; if (deleteResult.deletedCount === 0) throw { statusCode: 404, message: 'No matching data found.'}; }); if (req.db.allowDrop && wasDeleted && refundDetailsForUpdate) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: refundDetailsForUpdate.owner, status: 'active' }, { $inc: { [storageField]: -refundDetailsForUpdate.size } } ); } res.status(200).json({ success: true, message: 'Shared data deleted.' }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; ================================================================================= FILE: src/api/controllers/storage.controller.js ================================================================================= const { handleApiError } = require('../../utils/api.utils'); /** * Reports the user's current storage usage and their quota limit. */ exports.getQuota = async (req, res) => { try { // The `authenticate` middleware has already populated req.user. const { user } = req; const used = (user.storageUsedProd || 0) + (user.storageUsedTest || 0); res.status(200).json({ used: used, limit: user.quotaLimit, }); } catch (e) { handleApiError(res, e); } }; ================================================================================= FILE: src/api/controllers/misc.controller.js ================================================================================= const crypto = require('crypto'); const { handleApiError } = require('../../utils/api.utils'); const cryptoService = require('../../services/crypto.service'); /** * Looks up the public keys for a given email address. * The user must be authenticated to use this endpoint. */ exports.lookupKeys = async (req, res) => { try { const { email } = req.body; if (!email) { return res.status(400).json({ error: 'Email is required.' }); } const { UserKey } = req.db.models; const { pepper } = req; // Find the active key for the requested user. const userKeyDoc = await UserKey.findOne({ ownerSimpleHash: cryptoService.simpleHash(email.toLowerCase(), pepper), status: 'active' }).lean(); // .lean() for performance, as we only need to read data. if (!userKeyDoc) { return res.status(404).json({ error: 'User not found or has no active key.' }); } // Return only the public keys. res.status(200).json({ publicKey: userKeyDoc.publicKey, signingPublicKey: userKeyDoc.signingPublicKey }); } catch (e) { handleApiError(res, e); } }; /** * Generates temporary TURN server credentials for a WebRTC connection. * The user must be authenticated. The credentials are valid for 12 hours. */ exports.getTurnCredentials = async (req, res) => { try { // The `authenticate` middleware has already validated the user. // We can now safely generate and return temporary TURN credentials. // Credentials will be valid for 12 hours (as in the original code). const expiry = Math.floor(Date.now() / 1000) + (12 * 3600); // The temporary username IS the expiry timestamp. const username = expiry.toString(); // The temporary password is an HMAC-SHA1 hash, as required by the TURN standard. // We get the TURN_SECRET from the request object, where it's injected by app.js. const hmac = crypto.createHmac('sha1', req.turnSecret); hmac.update(username); const credential = hmac.digest('base64'); // Send the credentials and server URLs back to the client. res.status(200).json({ username: username, credential: credential, urls: [ 'stun:stun.l.google.com:19302', 'stun:stun1.l.google.com:19302', 'turn:haskellai.com:3478' ] }); } catch (e) { handleApiError(res, e); } }; ================================================================================= FILE: src/api/controllers/userdata.controller.js ================================================================================= const mongoose = require('mongoose'); const { Readable } = require('stream'); const sodium = require('libsodium-wrappers'); const zstd = require('@mongodb-js/zstd'); const zlib = require('zlib'); const contentDisposition = require('content-disposition'); const { handleApiError, sendCompressedJson } = require('../../utils/api.utils.js'); const cryptoService = require('../../services/crypto.service'); const quotaService = require('../../services/quota.service'); const GRIDFS_CHUNK_SIZE_BYTES = 512 * 1024; function buildUserDataQuery(emailHash, signingSecretKey, queryParams) { const filter = { ownerSimpleHash: emailHash }; const hmacKey = sodium.crypto_generichash(sodium.crypto_auth_KEYBYTES, signingSecretKey); const encryptedTagHashes = []; const plaintextTagFilters = {}; let hasTags = false; Object.keys(queryParams).forEach(key => { if (key.startsWith('tags.')) { hasTags = true; const tagKey = key.substring(5); if (tagKey.startsWith('$')) { console.warn(`SECURITY: Rejected query with potential operator in key: ${tagKey}`); return; } const tagValue = queryParams[key]; if (typeof tagValue === 'string' || typeof tagValue === 'number') { plaintextTagFilters[`tags.${tagKey}`] = tagValue; encryptedTagHashes.push({ keyHash: cryptoService.keyedHash(tagKey, hmacKey), valueHash: cryptoService.keyedHash(String(tagValue), hmacKey) }); } else { console.warn(`Attempted non-scalar query on tag '${tagKey}'. Ignoring.`); } } }); if (hasTags) { filter.$or = [ { isEncrypted: false, ...plaintextTagFilters }, { isEncrypted: true, tagHashes: { $all: encryptedTagHashes } } ]; } return filter; } async function processAndSendUserData(doc, req, res) { const { ownerSimpleHash, publicKey, secretKey } = req.userKeys; const { gfs } = req.db; if (doc.ownerSimpleHash !== ownerSimpleHash) { throw { statusCode: 404, message: 'Data not found.' }; } let payloadFromDb = Buffer.alloc(0); if (doc.payload) { const chunks = []; const downloadStream = gfs.openDownloadStream(doc.payload); for await (const chunk of downloadStream) { chunks.push(chunk); } payloadFromDb = Buffer.concat(chunks); } let finalPayload; let finalMetadata; if (doc.isEncrypted) { const metadataCiphertext = Buffer.from(doc.encryptedMetadata.buffer); const decryptedMetadataJson = sodium.crypto_box_seal_open(metadataCiphertext, publicKey, secretKey); if (!decryptedMetadataJson) throw { statusCode: 403, message: 'Failed to decrypt metadata.' }; finalMetadata = JSON.parse(sodium.to_string(decryptedMetadataJson)); if (payloadFromDb.length > 0) { const decryptedPayload = sodium.crypto_box_seal_open(payloadFromDb, publicKey, secretKey); if (!decryptedPayload) throw { statusCode: 403, message: 'Failed to decrypt payload.' }; finalPayload = Buffer.from(decryptedPayload); } else { finalPayload = payloadFromDb; } } else { finalMetadata = doc.metadata; finalPayload = payloadFromDb; } const mimeType = finalMetadata.payloadMetadata?.['mime-type'] || 'application/octet-stream'; res.setHeader('Content-Type', mimeType); if (finalMetadata.payloadMetadata?.filename) { res.setHeader('Content-Disposition', contentDisposition(finalMetadata.payloadMetadata.filename)); } let payloadToSend = finalPayload; if (finalMetadata.isCompressed) { if (req.headers['accept-encoding']?.includes('zstd')) { res.setHeader('Content-Encoding', 'zstd'); } else { const decompressed = await zstd.decompress(payloadToSend); payloadToSend = zlib.gzipSync(decompressed); res.setHeader('Content-Encoding', 'gzip'); } } res.setHeader('Content-Length', payloadToSend.length.toString()); res.setHeader('X-Encrypted', String(doc.isEncrypted)); res.setHeader('X-Compressed', finalMetadata.isCompressed ? 'true' : 'false'); if (doc.expiresAt) res.setHeader('X-Expires-At', doc.expiresAt.toUTCString()); const encodeHeaderValue = (value) => { if (/[^\u0000-\u00ff]/.test(value)) { const utf8Bytes = Buffer.from(value, 'utf-8'); return `B64:${utf8Bytes.toString('base64')}`; } return value; }; res.setHeader('X-Message-Metadata', encodeHeaderValue(JSON.stringify(finalMetadata.tags || {}))); res.setHeader('X-Payload-Metadata', encodeHeaderValue(JSON.stringify(finalMetadata.payloadMetadata || {}))); res.setHeader('X-ID', doc._id.toString()); res.status(200).send(payloadToSend); } exports.createUserData = async (req, res) => { let session; try { const { models, gfs } = req.db; const { UserData, UserKey } = models; const { ownerSimpleHash } = req.userKeys; // --- Pre-flight check logic (remains the same) --- // This part is okay because it's just a preliminary check to fail fast. const { signingSecretKey } = req.userKeys; const tags = JSON.parse(req.header('X-Metadata') || '{}'); const encryptData = req.header('X-Encrypt-Payload') !== 'false'; if (req.db.allowDrop) { let preFilter; if (encryptData) { const hmacKey = sodium.crypto_generichash(sodium.crypto_auth_KEYBYTES, signingSecretKey); const tagHashes = Object.entries(tags).map(([key, value]) => ({ keyHash: cryptoService.keyedHash(key, hmacKey), valueHash: cryptoService.keyedHash(String(value), hmacKey) })); preFilter = { ownerSimpleHash, tagHashes: { $all: tagHashes, $size: tagHashes.length } }; } else { preFilter = { ownerSimpleHash, isEncrypted: false }; const tagKeys = Object.keys(tags); if (tagKeys.length === 0) { // If there are no tags, we can't find a unique document to replace. // The logic inside the transaction will handle creating a new doc correctly. // So we don't need to find an old doc here. } else { tagKeys.forEach(key => { preFilter[`tags.${key}`] = tags[key]; }); // This query now finds documents that contain the specified tags, regardless of key order. // To ensure an exact match (like the original code did), we also check that the number of keys matches. // This uses the $where operator, which can be less performant but is necessary for this logic. // NOTE: A more performant solution would be to add a `tagCount` field to the schema. preFilter['$where'] = `Object.keys(this.tags || {}).length === ${tagKeys.length}`; } } // This logic can result in a harmless "query is empty" if no tags are provided, which is fine. const oldDocForQuota = Object.keys(tags).length > 0 ? await UserData.findOne(preFilter).lean() : null; const oldUncompressedSize = oldDocForQuota?.metadata?.payloadMetadata?.payloadSize || 0; quotaService.checkQuota(req.user, req.body.length, oldUncompressedSize); } // ================================================================= // --- REFACTORED LOGIC STARTS HERE --- // ================================================================= session = await UserData.db.client.startSession(); let isUpdateOperation = false; let newDoc; let refundDetailsForUpdate = null; let chargeDetailsForUpdate = null; await session.withTransaction(async () => { const { publicKey } = req.userKeys; let compress = req.header('X-Compress-Payload') === 'true'; const clientPayloadMetadata = JSON.parse(req.header('X-Payload-Metadata') || '{}'); const expiresInSeconds = parseInt(req.header('X-Expires-In-Seconds'), 10); if (Object.keys(tags).length === 0) throw { statusCode: 400, message: 'X-Metadata with at least one tag is required.' }; const uncompressedPayload = req.body; clientPayloadMetadata.payloadSize = uncompressedPayload.length; let payloadForStorage = uncompressedPayload; if (compress && payloadForStorage.length > 0){ payloadForStorage = await zstd.compress(payloadForStorage); clientPayloadMetadata.compressedSize = payloadForStorage.length; } else { compress = false; } const finalMetadata = { payloadMetadata: clientPayloadMetadata, isCompressed: compress, serverTimestamp: new Date().toISOString(), tags: tags }; let filter; const docToSave = { isEncrypted: encryptData, ownerSimpleHash }; if (!isNaN(expiresInSeconds) && expiresInSeconds > 0) { docToSave.expiresAt = new Date(Date.now() + expiresInSeconds * 1000); } if (encryptData) { const hmacKey = sodium.crypto_generichash(sodium.crypto_auth_KEYBYTES, signingSecretKey); const tagHashes = Object.entries(tags).map(([key, value]) => ({ keyHash: cryptoService.keyedHash(key, hmacKey), valueHash: cryptoService.keyedHash(String(value), hmacKey) })); filter = { ownerSimpleHash, tagHashes: { $all: tagHashes, $size: tagHashes.length } }; docToSave.tagHashes = tagHashes; docToSave.encryptedMetadata = Buffer.from(sodium.crypto_box_seal(JSON.stringify(finalMetadata), publicKey)); if (payloadForStorage.length > 0) { payloadForStorage = Buffer.from(sodium.crypto_box_seal(payloadForStorage, publicKey)); } } else { filter = { ownerSimpleHash, isEncrypted: false }; const tagKeys = Object.keys(tags); tagKeys.forEach(key => { filter[`tags.${key}`] = tags[key]; }); filter['$where'] = `Object.keys(this.tags || {}).length === ${tagKeys.length}`; docToSave.tags = tags; docToSave.metadata = finalMetadata; } // --- Step 1: Find and process the old document (if it exists) --- const oldDoc = await UserData.findOne(filter).session(session); isUpdateOperation = !!oldDoc; if (oldDoc) { const oldCompressedSize = await quotaService.getPayloadSize(oldDoc.payload, gfs, session); if (oldCompressedSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { // Refund immediately on production DB await UserKey.updateOne( { ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -oldCompressedSize } }, { session } ); } else { // Defer refund for test DB refundDetailsForUpdate = { size: oldCompressedSize }; } } // Delete old payload and document if (oldDoc.payload) { const fileExists = await gfs.find({ _id: oldDoc.payload }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(oldDoc.payload, { session }); } } await UserData.deleteOne({ _id: oldDoc._id }, { session }); } // --- Step 2: Create the new document and payload --- if (payloadForStorage.length > 0) { const uploadStream = gfs.openUploadStream(clientPayloadMetadata.filename || 'payload', { session, chunkSizeBytes: GRIDFS_CHUNK_SIZE_BYTES }); Readable.from(payloadForStorage).pipe(uploadStream); await new Promise((resolve, reject) => { uploadStream.on('finish', resolve); uploadStream.on('error', reject); }); docToSave.payload = uploadStream.id; } newDoc = await new UserData(docToSave).save({ session }); // --- Step 3: Charge for the new payload --- const newCompressedSize = payloadForStorage.length; if (newCompressedSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { // Perform atomic charge on production DB const otherStorageField = 'storageUsedTest'; const updateResult = await UserKey.updateOne( { ownerSimpleHash: ownerSimpleHash, status: 'active', $expr: { $or: [ { $eq: ['$quotaLimit', -1] }, { $lte: [ { $add: [`$${storageField}`, `$${otherStorageField}`, uncompressedPayload.length] }, '$quotaLimit' ] } ] } }, { $inc: { [storageField]: newCompressedSize } }, { session } ); if (updateResult.modifiedCount === 0) { throw { statusCode: 413, message: 'Payload exceeds storage quota (atomic check failed).' }; } } else { // Defer charge for test DB chargeDetailsForUpdate = { size: newCompressedSize }; } } }); // --- Step 4: Apply deferred updates for the test database --- if (req.db.allowDrop) { const storageField = 'storageUsedTest'; if (refundDetailsForUpdate) { await UserKey.updateOne( { ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -refundDetailsForUpdate.size } } ); } if (chargeDetailsForUpdate) { await UserKey.updateOne( { ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: chargeDetailsForUpdate.size } } ); } } res.status(isUpdateOperation ? 200 : 201).json({ success: true, operation: isUpdateOperation ? 'updated' : 'created', id: newDoc._id, expiresAt: newDoc.expiresAt || null }); // ================================================================= // --- REFACTORED LOGIC ENDS HERE --- // ================================================================= } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.getUserData = async (req, res) => { try { const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const { publicKey, secretKey, signingSecretKey, ownerSimpleHash } = req.userKeys; const filter = buildUserDataQuery(ownerSimpleHash, signingSecretKey, query); const docs = await req.db.models.UserData.find(filter).lean(); const results = []; for (const doc of docs) { let metadata; if (doc.isEncrypted) { const metadataCiphertext = Buffer.from(doc.encryptedMetadata.buffer); const decryptedSecretJson = sodium.crypto_box_seal_open(metadataCiphertext, publicKey, secretKey); if (decryptedSecretJson) { metadata = JSON.parse(sodium.to_string(decryptedSecretJson)); } } else { metadata = doc.metadata; } if (metadata) { results.push({ id: doc._id, isEncrypted: doc.isEncrypted, expiresAt: doc.expiresAt, ...metadata }); } } await sendCompressedJson(req, res, results); } catch (e) { handleApiError(res, e); } }; exports.getUserDataById = async (req, res) => { try { const doc = await req.db.models.UserData.findById(req.params.id).lean(); if (!doc) throw { statusCode: 404, message: 'Data not found.' }; const etag = `"${doc._id.toString()}"`; res.setHeader('ETag', etag); res.setHeader('Cache-Control', 'private, max-age=31536000, immutable'); if (req.header('if-none-match') === etag) { return res.status(304).end(); } await processAndSendUserData(doc, req, res); } catch (e) { handleApiError(res, e); } }; exports.getUserDataByQuery = async (req, res) => { try { const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const hasTags = Object.keys(query).some(k => k.startsWith('tags.')); if (!hasTags) { throw { statusCode: 400, message: 'At least one tags. query parameter is required.' }; } const filter = buildUserDataQuery(req.userKeys.ownerSimpleHash, req.userKeys.signingSecretKey, query); const docs = await req.db.models.UserData.find(filter).lean(); if (docs.length === 0) { throw { statusCode: 404, message: 'No userdata found matching the specified tags.' }; } if (docs.length > 1) { throw { statusCode: 409, message: 'Conflict: Multiple records match the specified tags. Please use the ID-specific endpoint.' }; } await processAndSendUserData(docs[0], req, res); } catch (e) { handleApiError(res, e); } }; exports.deleteUserDataByQuery = async (req, res) => { let session; try { const { models, gfs } = req.db; const { UserData, UserKey } = models; session = await UserData.db.client.startSession(); let deletedCount = 0; let deletedIds = []; let totalPayloadSizeToRefund = 0; await session.withTransaction(async () => { const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const { signingSecretKey, ownerSimpleHash } = req.userKeys; const hasTags = Object.keys(query).some(k => k.startsWith('tags.')); if (!hasTags) throw { statusCode: 400, message: 'At least one tags. filter is required for deletion.' }; const filter = buildUserDataQuery(ownerSimpleHash, signingSecretKey, query); const docsToDelete = await UserData.find(filter).select({ _id: 1, payload: 1 }).session(session).lean(); if (docsToDelete.length > 0) { deletedIds = docsToDelete.map(doc => doc._id); const fileIdsToDelete = docsToDelete.map(doc => doc.payload).filter(id => id); let totalPayloadSize = 0; if (fileIdsToDelete.length > 0) { // --- FIX: Pass the session to make the find operation transactional --- const files = await gfs.find({ _id: { $in: fileIdsToDelete } }, { session }).project({ length: 1 }).toArray(); totalPayloadSize = files.reduce((sum, file) => sum + file.length, 0); for (const id of fileIdsToDelete) { const fileExists = await gfs.find({ _id: id }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(id, { session }); } } } const result = await UserData.deleteMany({ _id: { $in: deletedIds } }, { session }); deletedCount = result.deletedCount; if (deletedCount > 0 && totalPayloadSize > 0) { if (!req.db.allowDrop) { const storageField = 'storageUsedProd'; await UserKey.updateOne( { ownerSimpleHash: ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -totalPayloadSize } }, { session } ); } else { totalPayloadSizeToRefund = totalPayloadSize; } } if (deletedCount === 0) { throw { statusCode: 404, message: 'No matching data found (possibly deleted by a concurrent request).' }; } } }); if (req.db.allowDrop && deletedCount > 0 && totalPayloadSizeToRefund > 0) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: req.userKeys.ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -totalPayloadSizeToRefund } } ); } res.status(200).json({ success: true, deletedCount: deletedCount, deletedIds: deletedIds }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.deleteUserDataById = async (req, res) => { let session; try { const { models, gfs } = req.db; const { UserData, UserKey } = models; session = await UserData.db.client.startSession(); let deletedIds = []; let deletedCount = 0; let payloadSizeToRefund = 0; let wasDeleted = false; await session.withTransaction(async () => { const { id } = req.params; const { ownerSimpleHash } = req.userKeys; if (!mongoose.Types.ObjectId.isValid(id)) { throw { statusCode: 400, message: 'Invalid ID format.' }; } const doc = await UserData.findById(id).session(session).lean(); if (!doc) throw { statusCode: 404, message: 'No matching data found.' }; if (doc.ownerSimpleHash !== ownerSimpleHash) throw { statusCode: 404, message: 'No matching data found.' }; // --- FIX: Pass the session to make the size lookup transactional --- const payloadSize = await quotaService.getPayloadSize(doc.payload, gfs, session); if (doc.payload) { const fileExists = await gfs.find({ _id: doc.payload }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(doc.payload, { session }); } } const result = await UserData.deleteOne({ _id: id }, { session }); if (result.deletedCount > 0 && payloadSize > 0) { if (!req.db.allowDrop) { const storageField = 'storageUsedProd'; await UserKey.updateOne( { ownerSimpleHash: ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -payloadSize } }, { session } ); } else { payloadSizeToRefund = payloadSize; } } if (result.deletedCount === 0) { throw { statusCode: 404, message: 'No matching data found (possibly deleted by a concurrent request).' }; } deletedCount = result.deletedCount; deletedIds = [id]; wasDeleted = true; }); if (req.db.allowDrop && wasDeleted && payloadSizeToRefund > 0) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: req.userKeys.ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -payloadSizeToRefund } } ); } res.status(200).json({ success: true, message: `Successfully deleted document ${req.params.id}.`, deletedCount: deletedCount, deletedIds: deletedIds }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; ================================================================================= FILE: src/api/controllers/mail.controller.js ================================================================================= const { Readable } = require('stream'); const sodium = require('libsodium-wrappers'); const contentDisposition = require('content-disposition'); const { handleApiError, sendCompressedJson, canonicalStringify } = require('../../utils/api.utils'); const cryptoService = require('../../services/crypto.service'); const signatureService = require('../../services/signature.service'); const quotaService = require('../../services/quota.service'); const zstd = require('@mongodb-js/zstd'); const zlib = require('zlib'); const GRIDFS_CHUNK_SIZE_BYTES = 512 * 1024; exports.createMail = async (req, res) => { let session; try { if(req.db.allowDrop) { quotaService.checkQuota(req.user, req.body.length, 0); } const { models, gfs } = req.db; const { pepper } = req; const { Mail, UserKey } = models; const senderKeys = req.userKeys; const senderPlaintextEmail = req.plaintextEmail; const recipientEmails = [...new Set((req.header('X-Recipient-Emails') || '').trim().toLowerCase().split(/\s*,\s*|\s+/).filter(e => e))]; if (recipientEmails.length === 0) throw { statusCode: 400, message: 'X-Recipient-Emails header with at least one email is required.' }; const allEmailsForWrapping = [...new Set([...recipientEmails, senderPlaintextEmail])]; const emailHashMap = new Map(allEmailsForWrapping.map(e => [cryptoService.simpleHash(e, pepper), e])); const allKeyDocs = await UserKey.find({ ownerSimpleHash: { $in: [...emailHashMap.keys()] }, status: 'active' }).lean(); const foundRecipientHashes = new Set(allKeyDocs.map(d => d.ownerSimpleHash)); const missingRecipients = recipientEmails.filter(email => !foundRecipientHashes.has(cryptoService.simpleHash(email, pepper))); if (missingRecipients.length > 0) { throw { statusCode: 404, message: `One or more recipients could not be found or are not active: ${missingRecipients.join(', ')}` }; } session = await Mail.db.client.startSession(); let newMail; let chargeDetailsForUpdate = null; await session.withTransaction(async () => { const expiresInSeconds = parseInt(req.header('X-Expires-In-Seconds'), 10); const isEncrypted = req.header('X-Encrypt') !== 'false'; let compress = req.header('X-Compress-Payload') === 'true'; const tags = JSON.parse(req.header('X-Tags') || '{}'); const clientPayloadMetadata = JSON.parse(req.header('X-Payload-Metadata') || '{}'); const recipientEmailHashes = recipientEmails.map(e => cryptoService.simpleHash(e, pepper)); const tagHashes = Object.entries(tags).map(([key, value]) => ({ keyHash: cryptoService.simpleHash(key, pepper), valueHash: cryptoService.simpleHash(String(value), pepper), _id: false })); const uncompressedPayload = req.body; let payloadForStorage = uncompressedPayload; let compressedSize = null; if (compress && payloadForStorage.length > 0) { payloadForStorage = await zstd.compress(payloadForStorage); compressedSize = payloadForStorage.length; } else { compress = false; } const finalPayloadMetadata = { ...clientPayloadMetadata, payloadSize: uncompressedPayload.length, }; if (compressedSize !== null) { finalPayloadMetadata.compressedSize = compressedSize; } let newMailData = { ownerSimpleHash: senderKeys.ownerSimpleHash, recipientEmailHashes, tagHashes, isEncrypted }; const metadataToSign = { signerEmail: senderPlaintextEmail, signerHash: senderKeys.ownerSimpleHash, signingPublicKey: sodium.to_base64(senderKeys.signingPublicKey), payloadMetadata: finalPayloadMetadata, isCompressed: compress, serverTimestamp: new Date().toISOString(), tags: tags }; if (isEncrypted) { const dataFileKey = sodium.randombytes_buf(sodium.crypto_secretbox_KEYBYTES + sodium.crypto_secretbox_NONCEBYTES); const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const payloadMetaBuffer = Buffer.from(canonicalStringify(finalPayloadMetadata)); const fullBufferToSign = Buffer.concat([payloadForStorage, payloadMetaBuffer]); metadataToSign.signature_meta = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, payloadMetaBuffer)), senderKeys.signingSecretKey)); metadataToSign.signature_full = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, fullBufferToSign)), senderKeys.signingSecretKey)); newMailData.encryptedMetadata = Buffer.from(sodium.crypto_secretbox_easy(JSON.stringify(metadataToSign), nonce, key)); payloadForStorage = Buffer.from(sodium.crypto_secretbox_easy(payloadForStorage, nonce, key)); newMailData.wrappedKeys = new Map(); allKeyDocs.forEach(doc => { const email = emailHashMap.get(doc.ownerSimpleHash); const wrapped = Buffer.from(sodium.crypto_box_seal(dataFileKey, sodium.from_base64(doc.publicKey))); newMailData.wrappedKeys.set(cryptoService.simpleHash(email, pepper), wrapped); }); } else { const payloadMetaBuffer = Buffer.from(canonicalStringify(finalPayloadMetadata)); const fullBufferToSign = Buffer.concat([payloadForStorage, payloadMetaBuffer]); metadataToSign.signature_meta = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, payloadMetaBuffer)), senderKeys.signingSecretKey)); metadataToSign.signature_full = sodium.to_base64(sodium.crypto_sign_detached(Buffer.from(sodium.crypto_generichash(32, fullBufferToSign)), senderKeys.signingSecretKey)); newMailData.metadata = metadataToSign; } const uploadStream = gfs.openUploadStream(finalPayloadMetadata.filename || 'payload', { session, chunkSizeBytes: GRIDFS_CHUNK_SIZE_BYTES }); Readable.from(payloadForStorage).pipe(uploadStream); await new Promise((resolve, reject) => { uploadStream.on('finish', resolve); uploadStream.on('error', reject); }); newMailData.payload = uploadStream.id; newMail = new Mail(newMailData); if (!isNaN(expiresInSeconds) && expiresInSeconds > 0) { newMail.expiresAt = new Date(Date.now() + expiresInSeconds * 1000); } await newMail.save({ session }); const finalCompressedSize = payloadForStorage.length; const uncompressedSize = req.body.length; if (uncompressedSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { const otherStorageField = 'storageUsedTest'; const updateResult = await UserKey.updateOne( { ownerSimpleHash: senderKeys.ownerSimpleHash, status: 'active', $expr: { $or: [ { $eq: ['$quotaLimit', -1] }, { $lte: [ { $add: [`$${storageField}`, `$${otherStorageField}`, uncompressedSize] }, '$quotaLimit' ] } ] } }, { $inc: { [storageField]: finalCompressedSize } }, { session } ); if (updateResult.modifiedCount === 0) { throw { statusCode: 413, message: 'Payload exceeds storage quota (atomic check failed).' }; } } else { chargeDetailsForUpdate = { owner: senderKeys.ownerSimpleHash, size: finalCompressedSize }; } } }); if (req.db.allowDrop && chargeDetailsForUpdate) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: chargeDetailsForUpdate.owner, status: 'active' }, { $inc: { [storageField]: chargeDetailsForUpdate.size } } ); } res.status(201).json({ success: true, message: 'Mail created successfully.', id: newMail._id, expiresAt: newMail.expiresAt || null }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.getMailbox = async (req, res) => { try { const { models } = req.db; const { pepper, siteOwnerEmail } = req; const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const user = req.userKeys; const siteOwnerEmailSimpleHash = siteOwnerEmail ? cryptoService.simpleHash(siteOwnerEmail, pepper) : null; const filter = { recipientEmailHashes: user.ownerSimpleHash }; const tagHashesFilter = []; Object.keys(query).forEach(key => { if (key.startsWith('tags.')) { const tagKey = key.substring(5); const tagValue = query[key]; if (typeof tagValue === 'string') { tagHashesFilter.push({ keyHash: cryptoService.simpleHash(tagKey, pepper), valueHash: cryptoService.simpleHash(tagValue, pepper) }); } } }); if (tagHashesFilter.length > 0) filter.tagHashes = { $all: tagHashesFilter }; const mails = await models.Mail.find(filter).sort({ _id: -1 }).lean(); const inbox = []; for (const mail of mails) { const decrypted = await decryptMailMetadata(mail, user); if (!decrypted) continue; const { metadata } = decrypted; const validationResult = await signatureService.validateSignatures(metadata, mail.isEncrypted, req.db, pepper); const validation = { status: validationResult.valStatusMeta, signer: validationResult.signerMeta }; inbox.push({ mailId: mail._id, isEncrypted: mail.isEncrypted, validation: validation, tags: metadata.tags, ownerIsSiteOwner: mail.ownerSimpleHash === siteOwnerEmailSimpleHash, signerEmail: metadata.signerEmail, signerHash: metadata.signerHash, serverTimestamp: metadata.serverTimestamp, size: metadata.payloadMetadata?.payloadSize, isCompressed: metadata.isCompressed, payloadMetadata: metadata.payloadMetadata, recipientCount: mail.recipientEmailHashes.length, expiresAt: mail.expiresAt }); } await sendCompressedJson(req, res, inbox); } catch (e) { handleApiError(res, e); } }; exports.getMailById = async (req, res) => { try { const { models, gfs } = req.db; const { pepper, siteOwnerEmail } = req; const user = req.userKeys; const siteOwnerEmailSimpleHash = siteOwnerEmail ? cryptoService.simpleHash(siteOwnerEmail, pepper) : null; const mail = await models.Mail.findById(req.params.id).lean(); if (!mail || !mail.recipientEmailHashes.includes(user.ownerSimpleHash)) { throw { statusCode: 404, message: 'Mail not found or you are not a recipient.' }; } const etag = `"${mail._id.toString()}-${mail.payload.toString()}"`; res.setHeader('ETag', etag); res.setHeader('Cache-Control', 'private, max-age=31536000, immutable'); if (req.header('if-none-match') === etag) { return res.status(304).end(); } let payloadFromDb = Buffer.alloc(0); if (mail.payload) { const chunks = []; const downloadStream = gfs.openDownloadStream(mail.payload); for await (const chunk of downloadStream) chunks.push(chunk); payloadFromDb = Buffer.concat(chunks); } let finalMetadata, payloadForValidation; const decrypted = await decryptMailMetadata(mail, user); if (!decrypted) { throw { statusCode: 403, message: 'Failed to decrypt mail metadata. Your key may have changed.' }; } finalMetadata = decrypted.metadata; if (mail.isEncrypted) { const { dataFileKey } = decrypted; const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); if (payloadFromDb.length > 0) { const decryptedPayload = sodium.crypto_secretbox_open_easy(payloadFromDb, nonce, key); if (!decryptedPayload) throw { statusCode: 500, message: 'Failed to decrypt mail payload.' }; payloadForValidation = Buffer.from(decryptedPayload); } else { payloadForValidation = payloadFromDb; } } else { payloadForValidation = payloadFromDb; } const validationResult = await signatureService.validateSignatures(finalMetadata, mail.isEncrypted, req.db, pepper, payloadForValidation); const mimeType = finalMetadata.payloadMetadata?.['mime-type'] || 'application/octet-stream'; res.setHeader('Content-Type', mimeType); if (finalMetadata.payloadMetadata?.filename) { res.setHeader('Content-Disposition', contentDisposition(finalMetadata.payloadMetadata.filename)); } let payloadToSend = payloadForValidation; if (finalMetadata.isCompressed) { if (req.headers['accept-encoding']?.includes('zstd')) { res.setHeader('Content-Encoding', 'zstd'); } else { const decompressed = await zstd.decompress(payloadToSend); payloadToSend = zlib.gzipSync(decompressed); res.setHeader('Content-Encoding', 'gzip'); } } res.setHeader('Content-Length', payloadToSend.length.toString()); res.setHeader('X-Compressed', finalMetadata.isCompressed ? 'true' : 'false'); res.setHeader('X-Encrypted', String(mail.isEncrypted)); res.setHeader('X-Owner-Is-Site-Owner', String(mail.ownerSimpleHash === siteOwnerEmailSimpleHash)); res.setHeader('X-Validation-Status-Metadata', validationResult.valStatusMeta); if (validationResult.signerMeta) res.setHeader('X-Validation-Signer-Metadata', validationResult.signerMeta); res.setHeader('X-Validation-Status-Full', validationResult.valStatusFull); if (validationResult.signerFull) res.setHeader('X-Validation-Signer-Full', validationResult.signerFull); if (finalMetadata.signerEmail) res.setHeader('X-Sender-Email', finalMetadata.signerEmail); res.setHeader('X-Message-Metadata', JSON.stringify(finalMetadata.tags || {})); if (mail.expiresAt) res.setHeader('X-Expires-At', mail.expiresAt.toUTCString()); res.setHeader('X-ID', mail._id.toString()); res.setHeader('X-Payload-Metadata', JSON.stringify(finalMetadata.payloadMetadata || {})); res.status(200).send(payloadToSend); } catch (e) { handleApiError(res, e); } }; exports.deleteMail = async (req, res) => { let session; try { const { models, gfs } = req.db; const { Mail, UserKey } = models; session = await Mail.db.client.startSession(); let responseMessage = ''; let refundDetailsForUpdate = null; await session.withTransaction(async () => { const userEmailHash = req.userKeys.ownerSimpleHash; const mailId = req.params.id; const updateResult = await Mail.updateOne( { _id: mailId, recipientEmailHashes: userEmailHash }, { $pull: { recipientEmailHashes: userEmailHash }, $unset: { [`wrappedKeys.${userEmailHash}`]: '' } }, { session } ); if (updateResult.modifiedCount === 0) { const mailExists = await Mail.findById(mailId).session(session).lean(); if (!mailExists) throw { statusCode: 404, message: 'Mail not found.' }; else throw { statusCode: 403, message: 'You are not a recipient of this mail.' }; } const mail = await Mail.findById(mailId).session(session).lean(); if (mail && mail.recipientEmailHashes.length === 0) { const payloadSize = await quotaService.getPayloadSize(mail.payload, gfs, session); if (mail.payload) { const fileExists = await gfs.find({ _id: mail.payload }, { session }).limit(1).toArray(); if (fileExists.length > 0) { await gfs.delete(mail.payload, { session }); } } await Mail.deleteOne({ _id: mailId }, { session }); if (payloadSize > 0) { const storageField = req.db.allowDrop ? 'storageUsedTest' : 'storageUsedProd'; if (!req.db.allowDrop) { await UserKey.updateOne( { ownerSimpleHash: mail.ownerSimpleHash, status: 'active' }, { $inc: { [storageField]: -payloadSize } }, { session } ); } else { refundDetailsForUpdate = { owner: mail.ownerSimpleHash, size: payloadSize }; } } responseMessage = 'Mail deleted permanently as you were the last recipient.'; } else { responseMessage = 'You have been removed from the mail recipients.'; } }); if (req.db.allowDrop && refundDetailsForUpdate) { const storageField = 'storageUsedTest'; await UserKey.updateOne( { ownerSimpleHash: refundDetailsForUpdate.owner, status: 'active' }, { $inc: { [storageField]: -refundDetailsForUpdate.size } } ); } res.status(200).json({ success: true, message: responseMessage }); } catch (e) { handleApiError(res, e); } finally { if (session) await session.endSession(); } }; exports.getFullMail = async (req, res) => { try { const { models, gfs } = req.db; const { pepper } = req; const user = req.userKeys; // Base filter ensures users can only query mail where they are a recipient. const filter = { recipientEmailHashes: user.ownerSimpleHash }; // Add tag-based filtering, similar to userdata const queryHeader = req.header('X-Query-String'); const query = queryHeader ? Object.fromEntries(new URLSearchParams(queryHeader)) : req.query; const tagHashesFilter = []; Object.keys(query).forEach(key => { if (key.startsWith('tags.')) { const tagKey = key.substring(5); const tagValue = query[key]; if (typeof tagValue === 'string') { tagHashesFilter.push({ keyHash: cryptoService.simpleHash(tagKey, pepper), valueHash: cryptoService.simpleHash(tagValue, pepper) }); } } }); if (tagHashesFilter.length > 0) { filter.tagHashes = { $all: tagHashesFilter }; } const mails = await models.Mail.find(filter).lean(); const decryptedMails = await Promise.all(mails.map(async (mailDoc) => { const decrypted = await decryptMailMetadata(mailDoc, user); if (!decrypted) return null; let payloadContent = null; if (mailDoc.payload) { const chunks = []; const downloadStream = gfs.openDownloadStream(mailDoc.payload); for await (const chunk of downloadStream) chunks.push(chunk); const payloadFromDb = Buffer.concat(chunks); if (mailDoc.isEncrypted) { const { dataFileKey } = decrypted; const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const decryptedPayload = sodium.crypto_secretbox_open_easy(payloadFromDb, nonce, key); if (decryptedPayload) { payloadContent = JSON.parse(Buffer.from(decryptedPayload).toString('utf-8')); } } else { payloadContent = JSON.parse(payloadFromDb.toString('utf-8')); } } return { id: mailDoc._id.toString(), sender: decrypted.metadata.signerEmail, timestamp: new Date(decrypted.metadata.serverTimestamp).getTime(), field: decrypted.metadata.tags?.field, message: payloadContent }; })); await sendCompressedJson(req, res, decryptedMails.filter(m => m !== null)); } catch (e) { handleApiError(res, e); } }; /** * A core helper to decrypt the metadata of a mail document for a given user. * This is the true reusable logic. * @param {object} mailDoc - The raw mail document from the database (.lean()). * @param {object} userKeys - The authenticated user's decrypted keys. * @returns {Promise<{metadata: object, dataFileKey: Buffer}|null>} An object with the decrypted metadata and the key for the payload, or null on failure. */ async function decryptMailMetadata(mailDoc, userKeys) { if (!mailDoc.isEncrypted) { // For unencrypted mail, the metadata is plain and there's no dataFileKey. return { metadata: mailDoc.metadata, dataFileKey: null }; } const wrappedKeyBinary = mailDoc.wrappedKeys[userKeys.ownerSimpleHash]; if (!wrappedKeyBinary) return null; // User doesn't have a key for this mail const wrappedKey = Buffer.from(wrappedKeyBinary.buffer || wrappedKeyBinary); const dataFileKey = sodium.crypto_box_seal_open(wrappedKey, userKeys.publicKey, userKeys.secretKey); if (!dataFileKey) return null; // Failed to decrypt the main data key const key = dataFileKey.slice(0, sodium.crypto_secretbox_KEYBYTES); const nonce = dataFileKey.slice(sodium.crypto_secretbox_KEYBYTES); const metadataCiphertext = Buffer.from(mailDoc.encryptedMetadata.buffer || mailDoc.encryptedMetadata); const metadataJson = sodium.crypto_secretbox_open_easy(metadataCiphertext, nonce, key); if (!metadataJson) return null; // Failed to decrypt metadata const metadata = JSON.parse(sodium.to_string(metadataJson)); return { metadata, dataFileKey }; } ================================================================================= FILE: src/api/models/PendingValidation.js ================================================================================= const mongoose = require('mongoose'); const PendingValidationSchema = new mongoose.Schema( { tokenHash: { type: String, required: true, unique: true }, email: { type: String, required: true }, type: { type: String, enum: ['registration', 'password_reset'], required: true }, expiresAt: { type: Date, required: true } }, { collection: 'pendingvalidations' } ); // This TTL index will automatically delete documents after they expire PendingValidationSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 }); module.exports = PendingValidationSchema; ================================================================================= FILE: src/api/models/UserKey.js ================================================================================= const mongoose = require('mongoose'); const UserKeySchema = new mongoose.Schema( { ownerSimpleHash: { type: String, required: true }, ownerEmail: { type: String, sparse: true, index: true }, // For optional plaintext email storage publicKey: { type: String, required: true, unique: true }, encryptedBoxSecretKey: { type: String }, signingPublicKey: { type: String, unique: true, sparse: true }, encryptedSignSecretKey: { type: String }, status: { type: String, enum: ['active', 'archived'], default: 'active', index: true }, // This is the new field from the plan. It's now part of the formal schema. sessions: [{ _id: false, // Don't create a separate _id for each session object sessionId: { type: String, required: true, index: true }, encryptedXServerKey: { type: String, required: true }, createdAt: { type: Date, default: Date.now } }], storageUsedProd: { type: Number, default: 0 }, storageUsedTest: { type: Number, default: 0 }, quotaLimit: { type: Number, default: 10 * 1024 * 1024 } }, { collection: 'userkeys' } ); UserKeySchema.index({ ownerSimpleHash: 1 }, { unique: true, partialFilterExpression: { status: 'active' } }); module.exports = UserKeySchema; ================================================================================= FILE: src/api/models/SharedData.js ================================================================================= const mongoose = require('mongoose'); const SharedDataSchema = new mongoose.Schema( { ownerSimpleHash: { type: String, index: true, required: true }, domainHash: { type: String, required: true, index: true }, fieldHash: { type: String, required: true, index: true }, tagHashes: [ { keyHash: { type: String, required: true }, valueHash: { type: String, required: true }, _id: false } ], isEncrypted: { type: Boolean, required: true }, accessControl: { type: String, enum: ['public', 'authenticated', 'restricted'], required: true, index: true }, authorizedEmailHashes: { type: [String], sparse: true, index: true }, adminEmailHashes: { type: [String], sparse: true, index: true }, metadata: { type: Object }, payload: { type: mongoose.Schema.Types.ObjectId, ref: 'uploads.files' }, wrappedKeys: { type: Map, of: Buffer }, encryptedMetadata: { type: Buffer }, expiresAt: { type: Date } }, { collection: 'shareddata' } ); SharedDataSchema.index({ domainHash: 1, fieldHash: 1 }, { unique: true }); SharedDataSchema.index({ 'tagHashes.keyHash': 1, 'tagHashes.valueHash': 1 }); SharedDataSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 }); module.exports = SharedDataSchema; ================================================================================= FILE: src/api/models/UserData.js ================================================================================= const mongoose = require('mongoose'); const UserDataSchema = new mongoose.Schema( { ownerSimpleHash: { type: String, index: true, required: true }, tagHashes: [ { keyHash: { type: String, required: true }, valueHash: { type: String, required: true }, _id: false } ], tags: { type: Map, of: String }, isEncrypted: { type: Boolean, default: true }, payload: { type: mongoose.Schema.Types.ObjectId, ref: 'uploads.files' }, encryptedMetadata: { type: Buffer }, metadata: { type: Object }, expiresAt: { type: Date } }, { collection: 'userdata' } ); UserDataSchema.index({ 'tagHashes.keyHash': 1, 'tagHashes.valueHash': 1 }, { sparse: true }); UserDataSchema.index({ tags: 1 }, { sparse: true }); UserDataSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 }); module.exports = UserDataSchema; ================================================================================= FILE: src/api/models/Mail.js ================================================================================= const mongoose = require('mongoose'); const MailSchema = new mongoose.Schema( { ownerSimpleHash: { type: String, index: true, required: true }, recipientEmailHashes: [{ type: String, index: true }], tagHashes: [ { keyHash: { type: String, required: true }, valueHash: { type: String, required: true }, _id: false } ], isEncrypted: { type: Boolean, default: true, index: true }, wrappedKeys: { type: Map, of: Buffer }, encryptedMetadata: { type: Buffer }, metadata: { type: Object }, payload: { type: mongoose.Schema.Types.ObjectId, ref: 'uploads.files' }, expiresAt: { type: Date } }, { collection: 'mail' } ); MailSchema.index({ 'tagHashes.keyHash': 1, 'tagHashes.valueHash': 1 }); MailSchema.index({ expiresAt: 1 }, { expireAfterSeconds: 0 }); module.exports = MailSchema; ================================================================================= FILE: src/api/models/ServerMaintenance.js ================================================================================= const mongoose = require('mongoose'); const ServerMaintenanceSchema = new mongoose.Schema( { _id: { type: String, required: true }, // e.g., 'gridfs_cleanup_lock' lock_acquired_at: { type: Date }, last_cleaned_at: { type: Date } }, { collection: 'servermaintenance' } ); module.exports = ServerMaintenanceSchema; ================================================================================= FILE: src/api/routes/misc.routes.js ================================================================================= const express = require('express'); const authenticate = require('../middleware/authenticate'); const miscController = require('../controllers/misc.controller'); const router = express.Router(); // Apply authentication middleware to all routes in this file. router.use(authenticate); // This endpoint requires a JSON body, so we add the parser here. router.post('/keys/lookup', express.json(), miscController.lookupKeys); router.get('/webrtc/turn-credentials', miscController.getTurnCredentials); module.exports = router; ================================================================================= FILE: src/api/routes/auth.routes.js ================================================================================= const express = require('express'); const authController = require('../controllers/auth.controller'); const decodeBase64Headers = require('../middleware/decodeHeaders'); const authenticate = require('../middleware/authenticate'); // --- FIX: Import the new rate limiters --- const { failedLoginLimiter, sensitiveActionLimiter } = require('../middleware/rateLimiters'); const router = express.Router(); router.use(express.json()); // --- FIX: Apply the 'sensitiveActionLimiter' to these unauthenticated, high-cost routes --- router.post('/request-registration', sensitiveActionLimiter, authController.requestRegistration); router.post('/request-password-reset', sensitiveActionLimiter, authController.requestPasswordReset); // These routes are part of the multi-step flows and are less likely to be spammed directly router.post('/complete-registration', authController.completeRegistration); router.post('/perform-password-reset', authController.performPasswordReset); // --- FIX: Apply the 'failedLoginLimiter' specifically to the login endpoint --- router.post('/sessions', failedLoginLimiter, authController.createSession); // New endpoint for script-based session validation router.get('/validate-session', authController.validateSession); // These routes are already protected by the 'authenticate' middleware router.post('/logout', authenticate, authController.logout); router.post('/logout-all', authenticate, authController.logoutAll); router.post('/change-password', authController.changePassword); module.exports = router; ================================================================================= FILE: src/api/routes/mail.routes.js ================================================================================= const express = require('express'); const authenticate = require('../middleware/authenticate'); const noCache = require('../middleware/noCache'); const mailController = require('../controllers/mail.controller'); const router = express.Router(); const rawBodyParser = express.raw({ type: '*/*', limit: '16mb' }); // Apply the 'authenticate' middleware to ALL /mail routes. // No part of the mail module is public. router.use(authenticate); // --- Mail Endpoints --- router.post('/', rawBodyParser, mailController.createMail); router.get('/', noCache, mailController.getMailbox); router.get('/inbox', noCache, mailController.getFullMail); router.get('/:id', mailController.getMailById); router.delete('/:id', mailController.deleteMail); module.exports = router; ================================================================================= FILE: src/api/routes/index.js ================================================================================= const express = require('express'); const authRoutes = require('./auth.routes'); const userdataRoutes = require('./userdata.routes'); const shareddataRoutes = require('./shareddata.routes'); const mailRoutes = require('./mail.routes'); const miscRoutes = require('./misc.routes'); const storageRoutes = require('./storage.routes'); const decodeBase64Headers = require('../middleware/decodeHeaders'); const router = express.Router(); router.use(decodeBase64Headers); // Mount all modular routers router.use('/auth', authRoutes); router.use('/userdata', userdataRoutes); router.use('/shareddata', shareddataRoutes); router.use('/mail', mailRoutes); router.use('/storage', storageRoutes); router.use('/', miscRoutes); // Placeholder for the root of the API router.get('/', (req, res) => { res.json({ message: 'HaskellAI API is running.' }); }); module.exports = router; ================================================================================= FILE: src/api/routes/storage.routes.js ================================================================================= const express = require('express'); const authenticate = require('../middleware/authenticate'); const noCache = require('../middleware/noCache'); const storageController = require('../controllers/storage.controller'); const router = express.Router(); // All storage routes require authentication. router.use(authenticate); router.get('/quota', noCache, storageController.getQuota); module.exports = router; ================================================================================= FILE: src/api/routes/shareddata.routes.js ================================================================================= const express = require('express'); const authenticate = require('../middleware/authenticate'); const noCache = require('../middleware/noCache'); const shareddataController = require('../controllers/shareddata.controller'); const router = express.Router(); const rawBodyParser = express.raw({ type: '*/*', limit: '16mb' }); router.post('/', authenticate, rawBodyParser, shareddataController.createSharedData); router.get('/', noCache, shareddataController.getSharedData); router.get('/payload', noCache, shareddataController.getSharedDataPayloadByQuery); router.get('/:id/payload', shareddataController.getSharedDataPayloadById); router.delete('/', authenticate, shareddataController.deleteSharedDataByQuery); router.delete('/:id', authenticate, shareddataController.deleteSharedDataById); module.exports = router; ================================================================================= FILE: src/api/routes/userdata.routes.js ================================================================================= const express = require('express'); const authenticate = require('../middleware/authenticate'); const noCache = require('../middleware/noCache'); const userdataController = require('../controllers/userdata.controller'); const router = express.Router(); // Define body parsers needed for specific routes const rawBodyParser = express.raw({ type: '*/*', limit: '16mb' }); // Apply the new 'authenticate' middleware to ALL /userdata routes. // This single line replaces all the old header-checking logic. router.use(authenticate); // --- UserData Endpoints --- router.post('/', rawBodyParser, userdataController.createUserData); router.get('/', noCache, userdataController.getUserData); router.get('/payload', noCache, userdataController.getUserDataByQuery); router.get('/:id/payload', userdataController.getUserDataById); router.delete('/', userdataController.deleteUserDataByQuery); router.delete('/:id', userdataController.deleteUserDataById); module.exports = router; ================================================================================= FILE: src/services/email.service.js ================================================================================= const fs = require('fs/promises'); const path = require('path'); const nodemailer = require('nodemailer'); // --- Configuration --- const FROM_EMAIL = "noreply@haskellai.com"; const API_URL = 'https://api.sendgrid.com/v3/mail/send'; const SENDER_NAME = 'HaskellAI'; const CONTACT_EMAIL = 'cgayler@haskell.edu'; const WEBSITE_URL = 'https://haskellai.com'; const SETTINGS_DIR = path.join(process.cwd(), 'settings'); // --- Transporter Variables --- let sendGridApiKey = null; let directTransporter = null; async function init() { // --- Primary Method: Try to initialize SendGrid --- try { const keyPath = path.join(SETTINGS_DIR, 'sendgridkey.txt'); sendGridApiKey = (await fs.readFile(keyPath, 'utf8')).trim(); console.log('SendGrid API key loaded. SendGrid will be the primary email transport.'); } catch (error) { console.log('No sendgridkey.txt found. SendGrid transport will be skipped.'); } // --- Fallback Method: Try to initialize Nodemailer Direct Transport --- try { const dkimKeyPath = path.join(SETTINGS_DIR, 'mail.private.txt'); const privateKey = await fs.readFile(dkimKeyPath, 'utf8'); directTransporter = nodemailer.createTransport({ host: 'haskellai.com', // Your mail server's FQDN port: 587, // Standard submission port secure: false, // Use STARTTLS dkim: { domainName: 'haskellai.com', keySelector: 'mail', // The selector you used when generating the key privateKey: privateKey, }, }); // Verify the connection and configuration await directTransporter.verify(); console.log('Nodemailer direct transport configured successfully. This will be used as a fallback or primary if SendGrid is unavailable.'); } catch (error) { console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); console.error('!!! WARNING: Nodemailer direct transport setup failed. !!!'); console.error(`!!! Reason: ${error.message}`); console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); } if (!sendGridApiKey && !directTransporter) { console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); console.error('!!! CRITICAL ERROR: No email transports configured. !!!'); console.error('!!! Email services will NOT be functional. !!!'); console.error('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'); } } // --- Email Templates (Hardcoded from your script) --- const HTML_LOGO_SVG_BASE64 = 'CjxzdmcgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB2aWV3Qm94PSIwIDAgNDAwIDQwMCI+Cgk8IS0tIEJsYWNrIGJvcmRlciBmb3IgYW50ZW5uYXMgLS0+Cgk8cGF0aCBkPSJNIDUzIDE2MyBRIDAgNDggNTAgMjAiIHN0cm9rZT0iYmxhY2siIGZpbGw9Im5vbmUiIHN0cm9rZS13aWR0aD0iMjIiLz4KCTxwYXRoIGQ9Ik0gMzQ3IDE2MyBRIDQwMCA0OCAzNTAgMjAiIHN0cm9rZT0iYmxhY2siIGZpbGw9Im5vbmUiIHN0cm9rZS13aWR0aD0iMjIiLz4KCTwhLS0gQW50ZW5uYSBCYWxscyAtLT4KCTxjaXJjbGUgY3g9IjUwIiBjeT0iMjAiIHI9IjE1IiBmaWxsPSIjZDBkMGQwIiBzdHJva2U9ImJsYWNrIiBzdHJva2Utd2lkdGg9IjYiLz4KCTxjaXJjbGUgY3g9IjM1MCIgY3k9IjIwIiByPSIxNSIgZmlsbD0iI2QwZDBkMCIgc3Ryb2tlPSJibGFjayIgc3Ryb2tlLXdpZHRoPSI2IiV+Cgk8IS0tIFJvYm90IEVhcnMgLS0+Cgk8Y2lyY2xlIGN4PSI0MCIgY3k9IjI1MyIgcj0iMzAiIGZpbGw9IiNiMGIwYjAiIHN0cm9rZT0iYmxhY2siIHN0cm9rZS13aWR0aD0iOCIvPgoJPGNpcmNsZSBjeD0iMzYwIiBjeT0iMjUzIiByPSIzMCIgZmlsbD0iI2IwYjBiMCIgc3Ryb2tlPSJibGFjayIgc3Ryb2tlLXdpZHRoPSI4Ii8+Cgk8IS0tIFJvYm90IEhlYWQgLS0+Cgk8cmVjdCB4PSI0MCIgeT0iMTA4IiB3aWR0aD0iMzIwIiBoZWlnaHQ9IjI4OCIgcng9IjUwIiByeT0iNTAiIGZpbGw9IiNkMGQwZDAiIHN0cm9rZT0iYmxhY2siIHN0cm9rZS13aWR0aD0iOCIvPgoJPCEtLSBBbnRlbm5hcyAtLT4KCTxwYXRoIGQ9Ik0gNTMgMTYzIFEgMCA0OCA1MCAyMCIgc3Ryb2tlPSIjZDBkMGQwIiBmaWxsPSJub25lIiBzdHJva2Utd2lkdGg9IjEwIi8+Cgk8cGF0aCBkPSJNIDM0NyAxNjMgUSA0MDAgNDggMzUwIDIwIiBzdHJva2U9IiNkMGQwZDAiIGZpbGw9Im5vbmUiIHN0cm9rZS13aWR0aD0iMTAiLz4KCTwhLS0gUm9ib3QgRXllcyAtLT4KCTxjaXJjbGUgY3g9IjE0MCIgY3k9IjIyOCIgcj0iMzkuMDYyNSIgZmlsbD0id2hpdGUiIHN0cm9rZT0iYmxhY2siIHN0cm9rZS13aWR0aD0iOCIvPgoJPGNpcmNsZSBjeD0iMjYwIiBjeT0iMjI4IiByPSIzOS4wNjI1IiBmaWxsPSJ3aGl0ZSIgc3Ryb2tlPSJibGFjayIgc3Ryb2tlLXdpZHRoPSI4Ii8+Cgk8IS0tIEV5ZSBwdXBpbHMgLS0+Cgk8Y2lyY2xlIGN4PSIxNDAiIGN5PSIyMjgiIHI9IjEwIiBmaWxsPSJibGFjayIvPgoJPGNpcmNsZSBjeD0iMjYwIiBjeT0iMjI4IiByPSIxMCIgZmlsbD0iYmxhY2siLz4KCTwhLS0gQmxhY2sgYm9yZGVyIGZvciBtb3V0aCAtLT4KCTxwYXRoIGQ9Ik0gMTIwIDMyOCBRIDE3Ny41IDM0OCAyMjcuNSAzMjggVCAyODAgMzM3IiBzdHJva2U9ImJsYWNrIiBmaWxsPSJub25lIiBzdHJva2Utd2lkdGg9IjM4Ii8+Cgk8IS0tIFJvYm90IE1vdXRoIC0tPgoJPHBhdGggZD0iTSAxMjggMzI4IFEgMTc3LjUgMzQ4IDIyNy41IDMyOCBUIDI3NyAzMjgiIHN0cm9rZT0id2hpdGUiIGZpbGw9Im5vbmUiIHN0cm9rZS13aWR0aD0iMjAiLz4KPC9zdmc+Cg=='; function getHtmlBody(title, text, link, buttonText) { return `
HaskellAI Logo

${title}

${text}

${buttonText}

If you did not request this, please ignore this email or contact us at ${CONTACT_EMAIL} for support.

Learn more about HaskellAI and our tools by visiting our website: haskellai.com.

`; } function getPlainTextBody(text, link) { return `Welcome to HaskellAI!\n\n ${text}\n ${link}\n\n If you did not request this, please ignore this email or contact us at ${CONTACT_EMAIL} for support.\n Learn more about HaskellAI and our tools by visiting our website: ${WEBSITE_URL}\n`; } async function sendEmail({ to, subject, htmlBody, plainTextBody }) { // --- Primary Method: SendGrid --- if (sendGridApiKey) { try { const payload = { personalizations: [{ to: [{ email: to }] }], from: { name: SENDER_NAME, email: FROM_EMAIL }, subject: subject, content: [ { type: 'text/plain', value: plainTextBody }, { type: 'text/html', value: htmlBody } ] }; const response = await fetch(API_URL, { method: 'POST', headers: { 'Authorization': `Bearer ${sendGridApiKey}`, 'Content-Type': 'application/json' }, body: JSON.stringify(payload) }); if (!response.ok) { const errorBody = await response.json(); throw new Error(`SendGrid API Error: ${response.status} ${response.statusText} - ${JSON.stringify(errorBody)}`); } console.log(`Email sent successfully to ${to} via SendGrid.`); return; // Success, exit the function } catch (error) { console.error(`SendGrid failed: ${error.message}. Attempting fallback...`); // Do not re-throw; proceed to the fallback method below. } } // --- Fallback Method: Nodemailer Direct Transport --- if (directTransporter) { try { const mailOptions = { from: `"${SENDER_NAME}" <${FROM_EMAIL}>`, to: to, subject: subject, text: plainTextBody, html: htmlBody, }; await directTransporter.sendMail(mailOptions); console.log(`Email sent successfully to ${to} via direct transport (Nodemailer).`); } catch (error) { console.error(`Fallback email transport also failed for ${to}:`, error); throw new Error('Failed to send email via all available transports.'); } } else { // This block is reached if SendGrid failed and Nodemailer was never configured. throw new Error('Failed to send email: SendGrid failed and no fallback transport is configured.'); } } async function sendRegistrationEmail(email, token) { const subject = 'Verify Your Email with HaskellAI'; const params = new URLSearchParams({ action: 'register', email: email, token: token }); const link = `https://haskellai.com/?LandingPage&${params.toString()}`; const text = 'Complete your registration to access HaskellAI, our comprehensive AI platform available to all Haskell Indian Nations University students and staff. Please verify your email address by clicking the link below:'; await sendEmail({ to: email, subject, htmlBody: getHtmlBody('Welcome to HaskellAI!', text, link, 'Verify Email'), plainTextBody: getPlainTextBody(text, link) }); } async function sendResetPasswordEmail(email, token) { const subject = 'Your HaskellAI Password Reset Request'; const params = new URLSearchParams({ action: 'reset', email: email, token: token }); const link = `https://haskellai.com/?LandingPage&${params.toString()}`; const text = 'You have requested to reset your password for HaskellAI. Please click the link below to proceed. This link will expire in 15 minutes.'; await sendEmail({ to: email, subject, htmlBody: getHtmlBody('Reset Your Password', text, link, 'Reset Password'), plainTextBody: getPlainTextBody(text, link) }); } module.exports = { init, sendRegistrationEmail, sendResetPasswordEmail, }; ================================================================================= FILE: src/services/crypto.service.js ================================================================================= const sodium = require('libsodium-wrappers'); function simpleHash(data, pepper) { return sodium.to_base64(sodium.crypto_generichash(32, data, pepper)); } function keyedHash(data, key) { return sodium.to_base64(sodium.crypto_auth(data, key)); } function encryptSecretKey(secretKey, xserverkey) { const nonce = sodium.randombytes_buf(sodium.crypto_secretbox_NONCEBYTES); const ciphertext = sodium.crypto_secretbox_easy(secretKey, nonce, xserverkey); const nonce_b64 = sodium.to_base64(nonce); const ciphertext_b64 = sodium.to_base64(ciphertext); return `${nonce_b64}:${ciphertext_b64}`; } function decryptSecretKey(encryptedData, xserverkey) { try { const [nonce_b64, ciphertext_b64] = encryptedData.split(':'); const nonce = sodium.from_base64(nonce_b64); const ciphertext = sodium.from_base64(ciphertext_b64); const decrypted = sodium.crypto_secretbox_open_easy(ciphertext, nonce, xserverkey); return decrypted ? Buffer.from(decrypted) : null; } catch (e) { // This will now only fail for a genuinely bad key or corrupted data. return null; } } /** * Decrypts a user's master keys using their xserverkey. * Encapsulates the core decryption logic for reuse. * @param {object} userDoc - The Mongoose user document. * @param {Buffer} xserverkey - The plaintext xserverkey for decryption. * @returns {object|null} The full userKeys object or null on failure. */ function decryptUserKeys(userDoc, xserverkey) { try { const boxSecretKey = decryptSecretKey(userDoc.encryptedBoxSecretKey, xserverkey); const signSecretKey = decryptSecretKey(userDoc.encryptedSignSecretKey, xserverkey); if (!boxSecretKey || !signSecretKey) { return null; } return { ownerSimpleHash: userDoc.ownerSimpleHash, publicKey: sodium.from_base64(userDoc.publicKey), secretKey: boxSecretKey, signingPublicKey: sodium.from_base64(userDoc.signingPublicKey), signingSecretKey: signSecretKey }; } catch (error) { console.error("Error during user key decryption:", error); return null; } } module.exports = { simpleHash, keyedHash, encryptSecretKey, decryptSecretKey, decryptUserKeys, }; ================================================================================= FILE: src/services/token.service.js ================================================================================= const crypto = require('crypto'); const sodium = require('libsodium-wrappers'); /** * Generates a secure random token and its libsodium-based hash. * @returns {{token: string, tokenHash: string}} The raw token (hex) and its hash (hex). */ function generateToken() { // We can still use crypto.randomBytes as it's a standard, secure source of randomness. const token = crypto.randomBytes(32).toString('hex'); // Hash the token using libsodium's generic hash (BLAKE2b) and encode as hex. const tokenHash = sodium.to_hex(sodium.crypto_generichash(32, token)); return { token, tokenHash }; } /** * Hashes a given token using the libsodium-based project-standard method. * @param {string} token The plaintext token from the user. * @returns {string} The resulting hash, encoded as a hex string. */ function hashToken(token) { return sodium.to_hex(sodium.crypto_generichash(32, token)); } module.exports = { generateToken, hashToken, }; ================================================================================= FILE: src/services/quota.service.js ================================================================================= const fs = require('fs'); const path = require('path'); const SETTINGS_DIR = path.join(process.cwd(), 'settings'); const DEFAULT_GLOBAL_LIMIT = 10 * 1024 * 1024; // 10 MB /** * Resolves a user's quota by checking email-specific, then domain-specific, * then global limits from the configuration files. * @param {string} email The user's plaintext email. * @param {object} req The Express request object containing the pre-loaded quota maps. * @returns {number} The user's storage quota in bytes. -1 means unlimited. */ function resolveUserQuota(email, req) { const lowercasedEmail = email.toLowerCase(); // 1. Check for a specific email limit if (req.emailLimits && req.emailLimits.has(lowercasedEmail)) { return req.emailLimits.get(lowercasedEmail); } // 2. Check for a domain-specific limit const domain = lowercasedEmail.substring(lowercasedEmail.lastIndexOf("@") + 1); if (req.domainLimits && req.domainLimits.has(domain)) { return req.domainLimits.get(domain); } // 3. Return the global limit return req.globalLimit; } /** * Performs a pre-flight check to see if a user can store a payload of a given size. * @param {object} user The authenticated user object from req.user. * @param {number} newPayloadSize The size of the incoming payload in bytes. * @param {number} [oldPayloadSize=0] The size of the payload being replaced (for updates). */ function checkQuota(user, newPayloadSize, oldPayloadSize = 0) { // Allow metadata-only operations (no payload) if (newPayloadSize === 0) { return; } const limit = user.quotaLimit; // -1 signifies an unlimited quota if (limit === -1) { return; } const currentUsage = (user.storageUsedProd || 0) + (user.storageUsedTest || 0); const projectedUsage = currentUsage - oldPayloadSize + newPayloadSize; if (projectedUsage > limit) { throw { statusCode: 413, message: `Payload exceeds storage quota. Limit: ${limit} bytes, Current Usage: ${currentUsage} bytes, Projected Usage: ${projectedUsage} bytes.` }; } } /** * Helper function to get the size of a payload from GridFS. * @param {mongoose.Types.ObjectId} payloadId The ID of the file in GridFS. * @param {object} gfs The GridFSBucket instance. * @param {object} [session] The optional MongoDB session for transactional reads. * @returns {Promise} The size of the file in bytes, or 0 if not found. */ async function getPayloadSize(payloadId, gfs, session = null) { if (!payloadId) return 0; // Pass the session to the find operation to make it transaction-aware. const files = await gfs.find({ _id: payloadId }, { session }).limit(1).toArray(); return files.length > 0 ? files[0].length : 0; } module.exports = { resolveUserQuota, checkQuota, getPayloadSize }; ================================================================================= FILE: src/services/maintenance.service.js ================================================================================= const fs = require('fs'); const path = require('path'); const SETTINGS_DIR = path.join(process.cwd(), 'settings'); // This is a self-contained function to load quota rules, as the maintenance service // does not have access to the Express `req` object. function loadQuotaConfigForMaintenance() { const config = { emailLimits: new Map(), domainLimits: new Map(), globalLimit: 10 * 1024 * 1024 // 10 MB default }; try { const parseFile = (content) => { const map = new Map(); content.split('\n').forEach(line => { const parts = line.trim().toLowerCase().split(/\s+/); if (parts[0]) { const limit = parts.length > 1 ? parseInt(parts[1], 10) : null; if (!isNaN(limit) && limit !== null) map.set(parts[0], limit); } }); return map; }; const globalLimitPath = path.join(SETTINGS_DIR, 'globallimit.txt'); if (fs.existsSync(globalLimitPath)) { const limitFromFile = parseInt(fs.readFileSync(globalLimitPath, 'utf8').trim(), 10); if (!isNaN(limitFromFile)) config.globalLimit = limitFromFile; } const emailsPath = path.join(SETTINGS_DIR, 'allowedemails.txt'); if (fs.existsSync(emailsPath)) { config.emailLimits = parseFile(fs.readFileSync(emailsPath, 'utf8')); } const domainsPath = path.join(SETTINGS_DIR, 'alloweddomains.txt'); if (fs.existsSync(domainsPath)) { config.domainLimits = parseFile(fs.readFileSync(domainsPath, 'utf8')); } } catch (e) { console.error("MAINTENANCE: Could not load quota config files.", e); } return config; } function resolveQuotaForMaintenance(email, config) { if (!email) return config.globalLimit; const lowercasedEmail = email.toLowerCase(); if (config.emailLimits.has(lowercasedEmail)) return config.emailLimits.get(lowercasedEmail); const domain = lowercasedEmail.substring(lowercasedEmail.lastIndexOf("@") + 1); if (config.domainLimits.has(domain)) return config.domainLimits.get(domain); return config.globalLimit; } const calculateUsageForDb = async (db, ownerSimpleHash) => { let total = 0; const collections = [db.models.UserData, db.models.SharedData, db.models.Mail]; for (const model of collections) { const results = await model.aggregate([ { $match: { ownerSimpleHash: ownerSimpleHash, payload: { $exists: true, $ne: null } } }, { $lookup: { from: 'uploads.files', localField: 'payload', foreignField: '_id', as: 'payloadDoc' } }, { $unwind: '$payloadDoc' }, { $group: { _id: null, totalSize: { $sum: '$payloadDoc.length' } } } ]); if (results.length > 0) { total += results[0].totalSize; } } return total; }; const bootstrapStorageQuotas = async (prodDbContext, testDbContext) => { console.log('[QUOTA_BOOTSTRAP] Starting one-time quota calculation for existing users...'); const { UserKey } = prodDbContext.models; const usersToBootstrap = await UserKey.find({ quotaLimit: { $exists: false } }); if (usersToBootstrap.length === 0) { console.log('[QUOTA_BOOTSTRAP] No users found needing bootstrapping. System is up to date.'); return; } console.log(`[QUOTA_BOOTSTRAP] Found ${usersToBootstrap.length} users to process.`); const quotaConfig = loadQuotaConfigForMaintenance(); for (const user of usersToBootstrap) { try { const [prodUsage, testUsage] = await Promise.all([ calculateUsageForDb(prodDbContext, user.ownerSimpleHash), calculateUsageForDb(testDbContext, user.ownerSimpleHash) ]); const quota = resolveQuotaForMaintenance(user.ownerEmail, quotaConfig); await UserKey.updateOne( { _id: user._id }, { $set: { storageUsedProd: prodUsage, storageUsedTest: testUsage, quotaLimit: quota }} ); console.log(`[QUOTA_BOOTSTRAP] User ${user.ownerSimpleHash} updated. Prod: ${prodUsage}, Test: ${testUsage}, Quota: ${quota}`); } catch (e) { console.error(`[QUOTA_BOOTSTRAP] Failed to process user ${user.ownerSimpleHash}:`, e); } } console.log('[QUOTA_BOOTSTRAP] Bootstrap process complete.'); }; const recalculateTestStorageUsage = async (prodDbContext, testDbContext) => { const LOCK_ID = 'test_storage_recalc_lock'; const ONE_HOUR_MS = 60 * 60 * 1000; const { ServerMaintenance } = prodDbContext.models; // Lock is on prod const { UserKey } = prodDbContext.models; try { const lockAcquired = await ServerMaintenance.findOneAndUpdate( { _id: LOCK_ID, $or: [{ lock_acquired_at: null }, { lock_acquired_at: { $lt: new Date(Date.now() - ONE_HOUR_MS) } }] }, { $set: { lock_acquired_at: new Date() } }, { upsert: true } ); if (!lockAcquired) { console.log('[QUOTA_RECALC] Recalculation lock held by another process. Skipping.'); return; } console.log('[QUOTA_RECALC] Starting daily reconciliation of test storage usage...'); const allUsers = await UserKey.find({}, { ownerSimpleHash: 1 }).lean(); for (const user of allUsers) { const testUsage = await calculateUsageForDb(testDbContext, user.ownerSimpleHash); await UserKey.updateOne({ _id: user._id }, { $set: { storageUsedTest: testUsage } }); } console.log(`[QUOTA_RECALC] Reconciliation complete for ${allUsers.length} users.`); await ServerMaintenance.updateOne({ _id: LOCK_ID }, { $set: { last_cleaned_at: new Date() }, $unset: { lock_acquired_at: '' } }); } catch (err) { console.error('[QUOTA_RECALC] Error during test storage reconciliation:', err); await ServerMaintenance.updateOne({ _id: LOCK_ID }, { $unset: { lock_acquired_at: '' } }); } }; const cleanOrphanedGridFSFiles = async (db) => { const LOCK_ID = 'gridfs_cleanup_lock'; const TWENTY_FOUR_HOURS_MS = 24 * 60 * 60 * 1000; const ONE_HOUR_MS = 60 * 60 * 1000; const dbName = db.conn.name; console.log(`[${dbName}] Checking if GridFS cleanup is needed...`); try { const { ServerMaintenance, UserData, SharedData, Mail } = db.models; const { gfs } = db; const maintenanceDoc = await ServerMaintenance.findById(LOCK_ID); if (maintenanceDoc && maintenanceDoc.last_cleaned_at && (new Date() - maintenanceDoc.last_cleaned_at) < TWENTY_FOUR_HOURS_MS) { console.log(`[${dbName}] GridFS cleanup not required yet (last run was within 24 hours).`); return; } const lockAcquired = await ServerMaintenance.findOneAndUpdate( { _id: LOCK_ID, $or: [ { lock_acquired_at: { $exists: false } }, { lock_acquired_at: null }, { lock_acquired_at: { $lt: new Date(Date.now() - ONE_HOUR_MS) } } // Break locks older than 1 hour ] }, { $set: { lock_acquired_at: new Date() } }, { upsert: true, new: true } ); if (!lockAcquired) { console.log(`[${dbName}] GridFS cleanup lock is held by another process. Skipping.`); return; } console.log(`[${dbName}] GridFS cleanup lock acquired. Starting cleanup...`); let deletedCount = 0; const activePayloads = new Set(); // Gather all active payload IDs from all relevant collections const [userDataPayloads, sharedDataPayloads, mailPayloads] = await Promise.all([ UserData.find({}, { payload: 1, _id: 0 }).lean(), SharedData.find({}, { payload: 1, _id: 0 }).lean(), Mail.find({}, { payload: 1, _id: 0 }).lean() ]); userDataPayloads.forEach(doc => doc.payload && activePayloads.add(doc.payload.toString())); sharedDataPayloads.forEach(doc => doc.payload && activePayloads.add(doc.payload.toString())); mailPayloads.forEach(doc => doc.payload && activePayloads.add(doc.payload.toString())); const gfsFiles = gfs.find(); for await (const file of gfsFiles) { if (!activePayloads.has(file._id.toString())) { try { await gfs.delete(file._id); deletedCount++; console.log(`[${dbName}] Deleted orphaned GridFS file: ${file.filename} (ID: ${file._id})`); } catch (err) { console.error(`[${dbName}] Failed to delete orphaned file ${file._id}:`, err); } } } console.log(`[${dbName}] GridFS cleanup complete. Deleted ${deletedCount} orphaned files.`); await ServerMaintenance.updateOne({ _id: LOCK_ID }, { $set: { last_cleaned_at: new Date() }, $unset: { lock_acquired_at: '' } }); } catch (err) { console.error(`[${dbName}] An error occurred during GridFS cleanup:`, err); // Attempt to release the lock even if an error occurred await db.models.ServerMaintenance.updateOne({ _id: LOCK_ID }, { $unset: { lock_acquired_at: '' } }); } }; module.exports = { cleanOrphanedGridFSFiles, bootstrapStorageQuotas, recalculateTestStorageUsage }; ================================================================================= FILE: src/services/signature.service.js ================================================================================= const sodium = require('libsodium-wrappers'); const cryptoService = require('./crypto.service'); const { canonicalStringify } = require('../utils/api.utils'); /** * Validates the digital signatures on a metadata object against a payload. * @param {object} metadata The metadata object containing signatures and sender info. * @param {boolean} isEncrypted Whether the parent document is encrypted. * @param {object} db The database context object. * @param {string} pepper The application-wide pepper. * @param {Buffer} [payloadBuffer] The compressed payload buffer, required for full validation. * @returns {Promise} An object with validation statuses and signer identities. */ async function validateSignatures(metadata, isEncrypted, db, pepper, payloadBuffer = null) { let result = { valStatusMeta: 'unsigned', valStatusFull: 'unsigned', signerMeta: null, signerFull: null }; if (!metadata.signature_meta || !metadata.signingPublicKey) return result; result.valStatusMeta = 'invalid'; result.valStatusFull = 'invalid'; let signerEmailHash; if (isEncrypted && metadata.signerEmail) { signerEmailHash = cryptoService.simpleHash(metadata.signerEmail, pepper); } else if (!isEncrypted && metadata.signerHash) { signerEmailHash = metadata.signerHash; } if (!signerEmailHash) return result; const payloadMetaBuffer = Buffer.from(canonicalStringify(metadata.payloadMetadata || {})); const hashMeta = Buffer.from(sodium.crypto_generichash(32, payloadMetaBuffer)); let hashFull = null; if (payloadBuffer && metadata.signature_full) { const fullBuffer = Buffer.concat([payloadBuffer, payloadMetaBuffer]); hashFull = Buffer.from(sodium.crypto_generichash(32, fullBuffer)); } const senderKeyDocs = await db.models.UserKey.find({ ownerSimpleHash: signerEmailHash }).sort({ _id: -1 }); for (const keyDoc of senderKeyDocs) { if (!keyDoc.signingPublicKey) continue; const senderSigningPublicKey = sodium.from_base64(keyDoc.signingPublicKey); const signerId = metadata.signerEmail ? metadata.signerEmail : keyDoc.signingPublicKey; // Check metadata signature only if we haven't found a valid one yet. if (result.valStatusMeta !== 'valid') { if (sodium.crypto_sign_verify_detached(sodium.from_base64(metadata.signature_meta), hashMeta, senderSigningPublicKey)) { result.valStatusMeta = keyDoc.status === 'active' ? 'valid' : 'expired'; result.signerMeta = signerId; } } // Check full payload signature only if we haven't found a valid one yet. if (hashFull && result.valStatusFull !== 'valid') { if (sodium.crypto_sign_verify_detached(sodium.from_base64(metadata.signature_full), hashFull, senderSigningPublicKey)) { result.valStatusFull = keyDoc.status === 'active' ? 'valid' : 'expired'; result.signerFull = signerId; } } // Stop searching ONLY if we have found 'valid' signatures for everything we need to check. const metaDone = result.valStatusMeta === 'valid'; const fullDone = !hashFull || result.valStatusFull === 'valid'; if (metaDone && fullDone) { break; } } return result; } module.exports = { validateSignatures };