mirror of
https://github.com/LogicLabs-OU/OpenArchiver.git
synced 2026-04-06 00:31:57 +02:00
Compare commits
17 Commits
v0.4.2
...
v0.5.1-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
eb72139537 | ||
|
|
f77435d22c | ||
|
|
b37736a23c | ||
|
|
4d405f096f | ||
|
|
70c62f81f7 | ||
|
|
b0f190595c | ||
|
|
970f28cc11 | ||
|
|
d99494e030 | ||
|
|
e5e119528f | ||
|
|
20ef9a42ae | ||
|
|
81b87b4b7e | ||
|
|
b5f95760f4 | ||
|
|
85000ad82b | ||
|
|
c5672d0f81 | ||
|
|
9b303c963e | ||
|
|
9228f64221 | ||
|
|
481a5ce6f9 |
29
.env.example
29
.env.example
@@ -13,6 +13,8 @@ ORIGIN=$APP_URL
|
||||
SYNC_FREQUENCY='* * * * *'
|
||||
# Set to 'true' to include Junk and Trash folders in the email archive. Defaults to false.
|
||||
ALL_INCLUSIVE_ARCHIVE=false
|
||||
# Number of mailbox jobs that run concurrently in the ingestion worker. Increase on servers with more RAM.
|
||||
INGESTION_WORKER_CONCURRENCY=5
|
||||
|
||||
# --- Docker Compose Service Configuration ---
|
||||
# These variables are used by docker-compose.yml to configure the services. Leave them unchanged if you use Docker services for Postgresql, Valkey (Redis) and Meilisearch. If you decide to use your own instances of these services, you can substitute them with your own connection credentials.
|
||||
@@ -43,7 +45,11 @@ REDIS_USER=notdefaultuser
|
||||
# --- Storage Settings ---
|
||||
# Choose your storage backend. Valid options are 'local' or 's3'.
|
||||
STORAGE_TYPE=local
|
||||
# The maximum request body size to accept in bytes including while streaming. The body size can also be specified with a unit suffix for kilobytes (K), megabytes (M), or gigabytes (G). For example, 512K or 1M. Defaults to 512kb. Or the value of Infinity if you don't want any upload limit.
|
||||
# The maximum request body size the SvelteKit frontend server will accept (including file uploads via streaming).
|
||||
# Accepts a numeric value in bytes, or a unit suffix: K (kilobytes), M (megabytes), G (gigabytes).
|
||||
# Set to 'Infinity' to remove the limit entirely (recommended for archiving large PST/Mbox files).
|
||||
# Examples: 512K, 100M, 5G, Infinity. Defaults to 512K if not set.
|
||||
# For very large files (multi-GB), consider using the "Local Path" ingestion option which bypasses this limit entirely.
|
||||
BODY_SIZE_LIMIT=100M
|
||||
|
||||
# --- Local Storage Settings ---
|
||||
@@ -98,3 +104,24 @@ ENCRYPTION_KEY=
|
||||
# Apache Tika Integration
|
||||
# ONLY active if TIKA_URL is set
|
||||
TIKA_URL=http://tika:9998
|
||||
|
||||
|
||||
# Enterprise features (Skip this part if you are using the open-source version)
|
||||
|
||||
# Batch size for managing retention policy lifecycle. (This number of emails will be checked each time when retention policy scans the database. Adjust based on your system capability.)
|
||||
RETENTION_BATCH_SIZE=1000
|
||||
|
||||
# --- SMTP Journaling (Enterprise only) ---
|
||||
# The port the embedded SMTP journaling listener binds to inside the container.
|
||||
# This is the port your MTA (Exchange, MS365, Postfix, etc.) will send journal reports to.
|
||||
# The docker-compose.yml maps this same port on the host side by default.
|
||||
SMTP_JOURNALING_PORT=2525
|
||||
# The domain used to generate routing addresses for journaling sources.
|
||||
# Each source gets a unique address like journal-<id>@<domain>.
|
||||
# Set this to the domain/subdomain whose MX record points to this server.
|
||||
SMTP_JOURNALING_DOMAIN=journal.yourdomain.com
|
||||
# Maximum number of waiting jobs in the journal queue before the SMTP listener
|
||||
# returns 4xx temporary failures (backpressure). The MTA will retry automatically.
|
||||
JOURNAL_QUEUE_BACKPRESSURE_THRESHOLD=10000
|
||||
#BullMQ worker concurrency for processing journaled emails. Increase on servers with more CPU cores.
|
||||
JOURNAL_WORKER_CONCURRENCY=3
|
||||
18
README.md
18
README.md
@@ -11,7 +11,7 @@
|
||||
|
||||
Open Archiver provides a robust, self-hosted solution for archiving, storing, indexing, and searching emails from major platforms, including Google Workspace (Gmail), Microsoft 365, PST files, as well as generic IMAP-enabled email inboxes. Use Open Archiver to keep a permanent, tamper-proof record of your communication history, free from vendor lock-in.
|
||||
|
||||
## 📸 Screenshots
|
||||
## Screenshots
|
||||
|
||||

|
||||
_Dashboard_
|
||||
@@ -22,9 +22,9 @@ _Archived emails_
|
||||

|
||||
_Full-text search across all your emails and attachments_
|
||||
|
||||
## 👨👩👧👦 Join our community!
|
||||
## Join our community!
|
||||
|
||||
We are committed to build an engaging community around Open Archiver, and we are inviting all of you to join our community on Discord to get real-time support and connect with the team.
|
||||
We are committed to building an engaging community around Open Archiver, and we are inviting all of you to join our community on Discord to get real-time support and connect with the team.
|
||||
|
||||
[](https://discord.gg/MTtD7BhuTQ)
|
||||
|
||||
@@ -34,11 +34,11 @@ We are committed to build an engaging community around Open Archiver, and we are
|
||||
|
||||
Check out the live demo here: https://demo.openarchiver.com
|
||||
|
||||
Username: admin@local.com
|
||||
Username: demo@openarchiver.com
|
||||
|
||||
Password: openarchiver_demo
|
||||
|
||||
## ✨ Key Features
|
||||
## Key Features
|
||||
|
||||
- **Universal Ingestion**: Connect to any email provider to perform initial bulk imports and maintain continuous, real-time synchronization. Ingestion sources include:
|
||||
- IMAP connection
|
||||
@@ -57,7 +57,7 @@ Password: openarchiver_demo
|
||||
- - Each archived email comes with an "Integrity Report" feature that indicates if the files are original.
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when.
|
||||
|
||||
## 🛠️ Tech Stack
|
||||
## Tech Stack
|
||||
|
||||
Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
|
||||
@@ -68,7 +68,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
- **Database**: PostgreSQL for metadata, user management, and audit logs
|
||||
- **Deployment**: Docker Compose deployment
|
||||
|
||||
## 📦 Deployment
|
||||
## Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -104,7 +104,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
4. **Access the application:**
|
||||
Once the services are running, you can access the Open Archiver web interface by navigating to `http://localhost:3000` in your web browser.
|
||||
|
||||
## ⚙️ Data Source Configuration
|
||||
## Data Source Configuration
|
||||
|
||||
After deploying the application, you will need to configure one or more ingestion sources to begin archiving emails. Follow our detailed guides to connect to your email provider:
|
||||
|
||||
@@ -112,7 +112,7 @@ After deploying the application, you will need to configure one or more ingestio
|
||||
- [Connecting to Microsoft 365](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
- [Connecting to a Generic IMAP Server](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
|
||||
## 🤝 Contributing
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ services:
|
||||
container_name: open-archiver
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '3000:3000' # Frontend
|
||||
- '${PORT_FRONTEND:-3000}:3000' # Frontend
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
@@ -42,7 +42,7 @@ services:
|
||||
- open-archiver-net
|
||||
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.15
|
||||
image: getmeili/meilisearch:v1.38
|
||||
container_name: meilisearch
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import { defineConfig } from 'vitepress';
|
||||
import { useSidebar } from 'vitepress-openapi';
|
||||
import spec from '../api/openapi.json';
|
||||
|
||||
export default defineConfig({
|
||||
head: [
|
||||
@@ -6,7 +8,7 @@ export default defineConfig({
|
||||
'script',
|
||||
{
|
||||
defer: '',
|
||||
src: 'https://analytics.zenceipt.com/script.js',
|
||||
src: 'https://analytics.openarchiver.com/script.js',
|
||||
'data-website-id': '2c8b452e-eab5-4f82-8ead-902d8f8b976f',
|
||||
},
|
||||
],
|
||||
@@ -95,7 +97,12 @@ export default defineConfig({
|
||||
{ text: 'Integrity Check', link: '/api/integrity' },
|
||||
{ text: 'Search', link: '/api/search' },
|
||||
{ text: 'Storage', link: '/api/storage' },
|
||||
{ text: 'Upload', link: '/api/upload' },
|
||||
{ text: 'Jobs', link: '/api/jobs' },
|
||||
{ text: 'Users', link: '/api/users' },
|
||||
{ text: 'IAM', link: '/api/iam' },
|
||||
{ text: 'API Keys', link: '/api/api-keys' },
|
||||
{ text: 'Settings', link: '/api/settings' },
|
||||
],
|
||||
},
|
||||
{
|
||||
|
||||
19
docs/.vitepress/theme/index.ts
Normal file
19
docs/.vitepress/theme/index.ts
Normal file
@@ -0,0 +1,19 @@
|
||||
import DefaultTheme from 'vitepress/theme';
|
||||
import type { EnhanceAppContext } from 'vitepress';
|
||||
import { theme, useOpenapi } from 'vitepress-openapi/client';
|
||||
import 'vitepress-openapi/dist/style.css';
|
||||
import spec from '../../api/openapi.json';
|
||||
|
||||
export default {
|
||||
...DefaultTheme,
|
||||
enhanceApp({ app, router, siteData }: EnhanceAppContext) {
|
||||
// Delegate to DefaultTheme first
|
||||
DefaultTheme.enhanceApp?.({ app, router, siteData });
|
||||
|
||||
// Install vitepress-openapi theme: registers i18n plugin + all OA components
|
||||
theme.enhanceApp({ app, router, siteData });
|
||||
|
||||
// Initialize the global OpenAPI spec
|
||||
useOpenapi({ spec });
|
||||
},
|
||||
};
|
||||
19
docs/api/api-keys.md
Normal file
19
docs/api/api-keys.md
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# API Keys
|
||||
|
||||
Generate and manage API keys for programmatic access to the Open Archiver API. API keys are scoped to the user that created them and carry the same permissions as that user. The raw key value is only shown once at creation time.
|
||||
|
||||
## Generate an API Key
|
||||
|
||||
<OAOperation operationId="generateApiKey" />
|
||||
|
||||
## List API Keys
|
||||
|
||||
<OAOperation operationId="getApiKeys" />
|
||||
|
||||
## Delete an API Key
|
||||
|
||||
<OAOperation operationId="deleteApiKey" />
|
||||
@@ -1,107 +1,19 @@
|
||||
# Archived Email Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Archived Email Service is responsible for retrieving archived emails and their details from the database and storage.
|
||||
# Archived Email API
|
||||
|
||||
## Endpoints
|
||||
Endpoints for retrieving and deleting archived emails. All endpoints require authentication and the appropriate `archive` permission.
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
## List Emails for an Ingestion Source
|
||||
|
||||
### GET /api/v1/archived-emails/ingestion-source/:ingestionSourceId
|
||||
<OAOperation operationId="getArchivedEmails" />
|
||||
|
||||
Retrieves a paginated list of archived emails for a specific ingestion source.
|
||||
## Get a Single Email
|
||||
|
||||
**Access:** Authenticated
|
||||
<OAOperation operationId="getArchivedEmailById" />
|
||||
|
||||
#### URL Parameters
|
||||
## Delete an Email
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :------------------ | :----- | :------------------------------------------------ |
|
||||
| `ingestionSourceId` | string | The ID of the ingestion source to get emails for. |
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
| :-------- | :----- | :------------------------------ | :------ |
|
||||
| `page` | number | The page number for pagination. | 1 |
|
||||
| `limit` | number | The number of items per page. | 10 |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** A paginated list of archived emails.
|
||||
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"sentAt": "2023-10-27T10:00:00.000Z",
|
||||
"hasAttachments": true,
|
||||
"recipients": [{ "name": "Recipient 1", "email": "recipient1@example.com" }]
|
||||
}
|
||||
],
|
||||
"total": 100,
|
||||
"page": 1,
|
||||
"limit": 10
|
||||
}
|
||||
```
|
||||
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### GET /api/v1/archived-emails/:id
|
||||
|
||||
Retrieves a single archived email by its ID, including its raw content and attachments.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :---------------------------- |
|
||||
| `id` | string | The ID of the archived email. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The archived email details.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"sentAt": "2023-10-27T10:00:00.000Z",
|
||||
"hasAttachments": true,
|
||||
"recipients": [{ "name": "Recipient 1", "email": "recipient1@example.com" }],
|
||||
"raw": "...",
|
||||
"attachments": [
|
||||
{
|
||||
"id": "attachment-id",
|
||||
"filename": "document.pdf",
|
||||
"mimeType": "application/pdf",
|
||||
"sizeBytes": 12345
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- **404 Not Found:** The archived email with the specified ID was not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
## Service Methods
|
||||
|
||||
### `getArchivedEmails(ingestionSourceId: string, page: number, limit: number): Promise<PaginatedArchivedEmails>`
|
||||
|
||||
Retrieves a paginated list of archived emails from the database for a given ingestion source.
|
||||
|
||||
- **ingestionSourceId:** The ID of the ingestion source.
|
||||
- **page:** The page number for pagination.
|
||||
- **limit:** The number of items per page.
|
||||
- **Returns:** A promise that resolves to a `PaginatedArchivedEmails` object.
|
||||
|
||||
### `getArchivedEmailById(emailId: string): Promise<ArchivedEmail | null>`
|
||||
|
||||
Retrieves a single archived email by its ID, including its raw content and attachments.
|
||||
|
||||
- **emailId:** The ID of the archived email.
|
||||
- **Returns:** A promise that resolves to an `ArchivedEmail` object or `null` if not found.
|
||||
<OAOperation operationId="deleteArchivedEmail" />
|
||||
|
||||
@@ -1,84 +1,19 @@
|
||||
# Auth Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Auth Service is responsible for handling user authentication, including login and token verification.
|
||||
# Auth API
|
||||
|
||||
## Endpoints
|
||||
Handles user authentication including initial setup, login, and application setup status.
|
||||
|
||||
### POST /api/v1/auth/login
|
||||
## Setup
|
||||
|
||||
Authenticates a user and returns a JWT if the credentials are valid.
|
||||
<OAOperation operationId="authSetup" />
|
||||
|
||||
**Access:** Public
|
||||
## Login
|
||||
|
||||
**Rate Limiting:** This endpoint is rate-limited to prevent brute-force attacks.
|
||||
<OAOperation operationId="authLogin" />
|
||||
|
||||
#### Request Body
|
||||
## Check Setup Status
|
||||
|
||||
| Field | Type | Description |
|
||||
| :--------- | :----- | :------------------------ |
|
||||
| `email` | string | The user's email address. |
|
||||
| `password` | string | The user's password. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** Authentication successful.
|
||||
|
||||
```json
|
||||
{
|
||||
"accessToken": "your.jwt.token",
|
||||
"user": {
|
||||
"id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"role": "user"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **400 Bad Request:** Email or password not provided.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Email and password are required"
|
||||
}
|
||||
```
|
||||
|
||||
- **401 Unauthorized:** Invalid credentials.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Invalid credentials"
|
||||
}
|
||||
```
|
||||
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "An internal server error occurred"
|
||||
}
|
||||
```
|
||||
|
||||
## Service Methods
|
||||
|
||||
### `verifyPassword(password: string, hash: string): Promise<boolean>`
|
||||
|
||||
Compares a plain-text password with a hashed password to verify its correctness.
|
||||
|
||||
- **password:** The plain-text password.
|
||||
- **hash:** The hashed password to compare against.
|
||||
- **Returns:** A promise that resolves to `true` if the password is valid, otherwise `false`.
|
||||
|
||||
### `login(email: string, password: string): Promise<LoginResponse | null>`
|
||||
|
||||
Handles the user login process. It finds the user by email, verifies the password, and generates a JWT upon successful authentication.
|
||||
|
||||
- **email:** The user's email.
|
||||
- **password:** The user's password.
|
||||
- **Returns:** A promise that resolves to a `LoginResponse` object containing the `accessToken` and `user` details, or `null` if authentication fails.
|
||||
|
||||
### `verifyToken(token: string): Promise<AuthTokenPayload | null>`
|
||||
|
||||
Verifies the authenticity and expiration of a JWT.
|
||||
|
||||
- **token:** The JWT string to verify.
|
||||
- **Returns:** A promise that resolves to the token's `AuthTokenPayload` if valid, otherwise `null`.
|
||||
<OAOperation operationId="authStatus" />
|
||||
|
||||
@@ -1,19 +1,25 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# API Authentication
|
||||
|
||||
To access protected API endpoints, you need to include a user-generated API key in the `X-API-KEY` header of your requests.
|
||||
The API supports two authentication methods. Use whichever fits your use case.
|
||||
|
||||
## 1. Creating an API Key
|
||||
## Method 1: JWT (User Login)
|
||||
|
||||
You can create, manage, and view your API keys through the application's user interface.
|
||||
Obtain a short-lived JWT by calling `POST /v1/auth/login` with your email and password, then pass it as a Bearer token in the `Authorization` header.
|
||||
|
||||
1. Navigate to **Settings > API Keys** in the dashboard.
|
||||
2. Click the **"Generate API Key"** button.
|
||||
3. Provide a descriptive name for your key and select an expiration period.
|
||||
4. The new API key will be displayed. **Copy this key immediately and store it in a secure location. You will not be able to see it again.**
|
||||
**Example:**
|
||||
|
||||
## 2. Making Authenticated Requests
|
||||
```http
|
||||
GET /api/v1/dashboard/stats
|
||||
Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...
|
||||
```
|
||||
|
||||
Once you have your API key, you must include it in the `X-API-KEY` header of all subsequent requests to protected API endpoints.
|
||||
## Method 2: API Key
|
||||
|
||||
Long-lived API keys are suited for automated scripts and integrations. Create one in **Settings > API Keys**, then pass it in the `X-API-KEY` header.
|
||||
|
||||
**Example:**
|
||||
|
||||
@@ -22,4 +28,13 @@ GET /api/v1/dashboard/stats
|
||||
X-API-KEY: a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2
|
||||
```
|
||||
|
||||
If the API key is missing, expired, or invalid, the API will respond with a `401 Unauthorized` status code.
|
||||
### Creating an API Key
|
||||
|
||||
1. Navigate to **Settings > API Keys** in the dashboard.
|
||||
2. Click **"Generate API Key"**.
|
||||
3. Provide a descriptive name and select an expiration period (max 2 years).
|
||||
4. Copy the key immediately — it will not be shown again.
|
||||
|
||||
---
|
||||
|
||||
If the token or API key is missing, expired, or invalid, the API responds with `401 Unauthorized`.
|
||||
|
||||
@@ -1,114 +1,27 @@
|
||||
# Dashboard Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Dashboard Service provides endpoints for retrieving statistics and data for the main dashboard.
|
||||
# Dashboard API
|
||||
|
||||
## Endpoints
|
||||
Aggregated statistics and summaries for the dashboard UI. Requires `read:dashboard` permission.
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
## Get Stats
|
||||
|
||||
### GET /api/v1/dashboard/stats
|
||||
<OAOperation operationId="getDashboardStats" />
|
||||
|
||||
Retrieves overall statistics, including the total number of archived emails, total storage used, and the number of failed ingestions in the last 7 days.
|
||||
## Get Ingestion History
|
||||
|
||||
**Access:** Authenticated
|
||||
<OAOperation operationId="getIngestionHistory" />
|
||||
|
||||
#### Responses
|
||||
## Get Ingestion Source Summaries
|
||||
|
||||
- **200 OK:** An object containing the dashboard statistics.
|
||||
<OAOperation operationId="getDashboardIngestionSources" />
|
||||
|
||||
```json
|
||||
{
|
||||
"totalEmailsArchived": 12345,
|
||||
"totalStorageUsed": 54321098,
|
||||
"failedIngestionsLast7Days": 3
|
||||
}
|
||||
```
|
||||
## Get Recent Syncs
|
||||
|
||||
### GET /api/v1/dashboard/ingestion-history
|
||||
<OAOperation operationId="getRecentSyncs" />
|
||||
|
||||
Retrieves the email ingestion history for the last 30 days, grouped by day.
|
||||
## Get Indexed Email Insights
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An object containing the ingestion history.
|
||||
|
||||
```json
|
||||
{
|
||||
"history": [
|
||||
{
|
||||
"date": "2023-09-27T00:00:00.000Z",
|
||||
"count": 150
|
||||
},
|
||||
{
|
||||
"date": "2023-09-28T00:00:00.000Z",
|
||||
"count": 200
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/ingestion-sources
|
||||
|
||||
Retrieves a list of all ingestion sources along with their status and storage usage.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An array of ingestion source objects.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "source-id-1",
|
||||
"name": "Google Workspace",
|
||||
"provider": "google",
|
||||
"status": "active",
|
||||
"storageUsed": 12345678
|
||||
},
|
||||
{
|
||||
"id": "source-id-2",
|
||||
"name": "Microsoft 365",
|
||||
"provider": "microsoft",
|
||||
"status": "error",
|
||||
"storageUsed": 87654321
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/recent-syncs
|
||||
|
||||
Retrieves a list of recent synchronization jobs. (Note: This is currently a placeholder and will return an empty array).
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An empty array.
|
||||
|
||||
```json
|
||||
[]
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/indexed-insights
|
||||
|
||||
Retrieves insights from the indexed email data, such as the top senders.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An object containing indexed insights.
|
||||
|
||||
```json
|
||||
{
|
||||
"topSenders": [
|
||||
{
|
||||
"sender": "user@example.com",
|
||||
"count": 42
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
<OAOperation operationId="getIndexedInsights" />
|
||||
|
||||
27
docs/api/iam.md
Normal file
27
docs/api/iam.md
Normal file
@@ -0,0 +1,27 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# IAM API
|
||||
|
||||
Manage Identity and Access Management roles and their CASL policy statements. Role management requires Super Admin (`manage:all`) permission. Reading roles requires `read:roles` permission.
|
||||
|
||||
## List All Roles
|
||||
|
||||
<OAOperation operationId="getRoles" />
|
||||
|
||||
## Create a Role
|
||||
|
||||
<OAOperation operationId="createRole" />
|
||||
|
||||
## Get a Role
|
||||
|
||||
<OAOperation operationId="getRoleById" />
|
||||
|
||||
## Update a Role
|
||||
|
||||
<OAOperation operationId="updateRole" />
|
||||
|
||||
## Delete a Role
|
||||
|
||||
<OAOperation operationId="deleteRole" />
|
||||
@@ -1,3 +1,7 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# API Overview
|
||||
|
||||
Welcome to the Open Archiver API documentation. This section provides detailed information about the available API endpoints.
|
||||
|
||||
@@ -1,196 +1,43 @@
|
||||
# Ingestion Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Ingestion Service manages ingestion sources, which are configurations for connecting to email providers and importing emails.
|
||||
# Ingestion API
|
||||
|
||||
## Endpoints
|
||||
Manage ingestion sources — the configured connections to email providers (Google Workspace, Microsoft 365, IMAP, and file imports). Credentials are never returned in responses.
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
## Create an Ingestion Source
|
||||
|
||||
### POST /api/v1/ingestion-sources
|
||||
<OAOperation operationId="createIngestionSource" />
|
||||
|
||||
Creates a new ingestion source.
|
||||
## List Ingestion Sources
|
||||
|
||||
**Access:** Authenticated
|
||||
<OAOperation operationId="listIngestionSources" />
|
||||
|
||||
#### Request Body
|
||||
## Get an Ingestion Source
|
||||
|
||||
The request body should be a `CreateIngestionSourceDto` object.
|
||||
<OAOperation operationId="getIngestionSourceById" />
|
||||
|
||||
```typescript
|
||||
interface CreateIngestionSourceDto {
|
||||
name: string;
|
||||
provider: 'google_workspace' | 'microsoft_365' | 'generic_imap' | 'pst_import' | 'eml_import' | 'mbox_import';
|
||||
providerConfig: IngestionCredentials;
|
||||
}
|
||||
```
|
||||
## Update an Ingestion Source
|
||||
|
||||
#### Example: Creating an Mbox Import Source with File Upload
|
||||
<OAOperation operationId="updateIngestionSource" />
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "My Mbox Import",
|
||||
"provider": "mbox_import",
|
||||
"providerConfig": {
|
||||
"type": "mbox_import",
|
||||
"uploadedFileName": "emails.mbox",
|
||||
"uploadedFilePath": "open-archiver/tmp/uuid-emails.mbox"
|
||||
}
|
||||
}
|
||||
```
|
||||
## Delete an Ingestion Source
|
||||
|
||||
#### Example: Creating an Mbox Import Source with Local File Path
|
||||
<OAOperation operationId="deleteIngestionSource" />
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "My Mbox Import",
|
||||
"provider": "mbox_import",
|
||||
"providerConfig": {
|
||||
"type": "mbox_import",
|
||||
"localFilePath": "/path/to/emails.mbox"
|
||||
}
|
||||
}
|
||||
```
|
||||
## Trigger Initial Import
|
||||
|
||||
**Note:** When using `localFilePath`, the file will not be deleted after import. When using `uploadedFilePath` (via the upload API), the file will be automatically deleted after import. The same applies to `pst_import` and `eml_import` providers.
|
||||
<OAOperation operationId="triggerInitialImport" />
|
||||
|
||||
**Important regarding `localFilePath`:** When running OpenArchiver in a Docker container (which is the standard deployment), `localFilePath` refers to the path **inside the Docker container**, not on the host machine.
|
||||
To use a local file:
|
||||
1. **Recommended:** Place your file inside the directory defined by `STORAGE_LOCAL_ROOT_PATH` (e.g., inside a `temp` folder). Since this directory is already mounted as a volume, the file will be accessible at the same path inside the container.
|
||||
2. **Alternative:** Mount a specific directory containing your files as a volume in `docker-compose.yml`. For example, add `- /path/to/my/files:/imports` to the `volumes` section and use `/imports/myfile.pst` as the `localFilePath`.
|
||||
## Pause an Ingestion Source
|
||||
|
||||
#### Responses
|
||||
<OAOperation operationId="pauseIngestionSource" />
|
||||
|
||||
- **201 Created:** The newly created ingestion source.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
## Force Sync
|
||||
|
||||
### GET /api/v1/ingestion-sources
|
||||
<OAOperation operationId="triggerForceSync" />
|
||||
|
||||
Retrieves all ingestion sources.
|
||||
## Unmerge an Ingestion Source
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An array of ingestion source objects.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### GET /api/v1/ingestion-sources/:id
|
||||
|
||||
Retrieves a single ingestion source by its ID.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The ingestion source object.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### PUT /api/v1/ingestion-sources/:id
|
||||
|
||||
Updates an existing ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
The request body should be an `UpdateIngestionSourceDto` object.
|
||||
|
||||
```typescript
|
||||
interface UpdateIngestionSourceDto {
|
||||
name?: string;
|
||||
provider?: 'google' | 'microsoft' | 'generic_imap';
|
||||
providerConfig?: IngestionCredentials;
|
||||
status?: 'pending_auth' | 'auth_success' | 'importing' | 'active' | 'paused' | 'error';
|
||||
}
|
||||
```
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The updated ingestion source object.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### DELETE /api/v1/ingestion-sources/:id
|
||||
|
||||
Deletes an ingestion source and all associated data.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **204 No Content:** The ingestion source was deleted successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### POST /api/v1/ingestion-sources/:id/import
|
||||
|
||||
Triggers the initial import process for an ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **202 Accepted:** The initial import was triggered successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### POST /api/v1/ingestion-sources/:id/pause
|
||||
|
||||
Pauses an active ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The updated ingestion source object with a `paused` status.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### POST /api/v1/ingestion-sources/:id/sync
|
||||
|
||||
Triggers a forced synchronization for an ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **202 Accepted:** The force sync was triggered successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
<OAOperation operationId="unmergeIngestionSource" />
|
||||
|
||||
@@ -1,51 +1,11 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Integrity Check API
|
||||
|
||||
The Integrity Check API provides an endpoint to verify the cryptographic hash of an archived email and its attachments against the stored values in the database. This allows you to ensure that the stored files have not been tampered with or corrupted since they were archived.
|
||||
Verify the SHA-256 hash of an archived email and all its attachments against the hashes stored at archival time.
|
||||
|
||||
## Check Email Integrity
|
||||
|
||||
Verifies the integrity of a specific archived email and all of its associated attachments.
|
||||
|
||||
- **URL:** `/api/v1/integrity/:id`
|
||||
- **Method:** `GET`
|
||||
- **URL Params:**
|
||||
- `id=[string]` (required) - The UUID of the archived email to check.
|
||||
- **Permissions:** `read:archive`
|
||||
- **Success Response:**
|
||||
- **Code:** 200 OK
|
||||
- **Content:** `IntegrityCheckResult[]`
|
||||
|
||||
### Response Body `IntegrityCheckResult`
|
||||
|
||||
An array of objects, each representing the result of an integrity check for a single file (either the email itself or an attachment).
|
||||
|
||||
| Field | Type | Description |
|
||||
| :--------- | :------------------------ | :-------------------------------------------------------------------------- |
|
||||
| `type` | `'email' \| 'attachment'` | The type of the file being checked. |
|
||||
| `id` | `string` | The UUID of the email or attachment. |
|
||||
| `filename` | `string` (optional) | The filename of the attachment. This field is only present for attachments. |
|
||||
| `isValid` | `boolean` | `true` if the current hash matches the stored hash, otherwise `false`. |
|
||||
| `reason` | `string` (optional) | A reason for the failure. Only present if `isValid` is `false`. |
|
||||
|
||||
### Example Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"type": "email",
|
||||
"id": "a1b2c3d4-e5f6-7890-1234-567890abcdef",
|
||||
"isValid": true
|
||||
},
|
||||
{
|
||||
"type": "attachment",
|
||||
"id": "b2c3d4e5-f6a7-8901-2345-67890abcdef1",
|
||||
"filename": "document.pdf",
|
||||
"isValid": false,
|
||||
"reason": "Stored hash does not match current hash."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
- **Error Response:**
|
||||
- **Code:** 404 Not Found
|
||||
- **Content:** `{ "message": "Archived email not found" }`
|
||||
<OAOperation operationId="checkIntegrity" />
|
||||
|
||||
132
docs/api/jobs.md
132
docs/api/jobs.md
@@ -1,128 +1,20 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Jobs API
|
||||
|
||||
The Jobs API provides endpoints for monitoring the job queues and the jobs within them.
|
||||
Monitor BullMQ job queues for asynchronous tasks such as email ingestion, indexing, and sync scheduling. Requires Super Admin (`manage:all`) permission.
|
||||
|
||||
## Overview
|
||||
There are two queues:
|
||||
|
||||
Open Archiver uses a job queue system to handle asynchronous tasks like email ingestion and indexing. The system is built on Redis and BullMQ and uses a producer-consumer pattern.
|
||||
- **`ingestion`** — handles all email ingestion and sync jobs (`initial-import`, `continuous-sync`, `process-mailbox`, `sync-cycle-finished`, `schedule-continuous-sync`)
|
||||
- **`indexing`** — handles batched Meilisearch document indexing (`index-email-batch`)
|
||||
|
||||
### Job Statuses
|
||||
## List All Queues
|
||||
|
||||
Jobs can have one of the following statuses:
|
||||
<OAOperation operationId="getQueues" />
|
||||
|
||||
- **active:** The job is currently being processed.
|
||||
- **completed:** The job has been completed successfully.
|
||||
- **failed:** The job has failed after all retry attempts.
|
||||
- **delayed:** The job is delayed and will be processed at a later time.
|
||||
- **waiting:** The job is waiting to be processed.
|
||||
- **paused:** The job is paused and will not be processed until it is resumed.
|
||||
## Get Jobs in a Queue
|
||||
|
||||
### Errors
|
||||
|
||||
When a job fails, the `failedReason` and `stacktrace` fields will contain information about the error. The `error` field will also be populated with the `failedReason` for easier access.
|
||||
|
||||
### Job Preservation
|
||||
|
||||
Jobs are preserved for a limited time after they are completed or failed. This means that the job counts and the jobs that you see in the API are for a limited time.
|
||||
|
||||
- **Completed jobs:** The last 1000 completed jobs are preserved.
|
||||
- **Failed jobs:** The last 5000 failed jobs are preserved.
|
||||
|
||||
## Get All Queues
|
||||
|
||||
- **Endpoint:** `GET /v1/jobs/queues`
|
||||
- **Description:** Retrieves a list of all job queues and their job counts.
|
||||
- **Permissions:** `manage:all`
|
||||
- **Responses:**
|
||||
- `200 OK`: Returns a list of queue overviews.
|
||||
- `401 Unauthorized`: If the user is not authenticated.
|
||||
- `403 Forbidden`: If the user does not have the required permissions.
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"queues": [
|
||||
{
|
||||
"name": "ingestion",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 56,
|
||||
"failed": 4,
|
||||
"delayed": 3,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "indexing",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 0,
|
||||
"failed": 0,
|
||||
"delayed": 0,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Get Queue Jobs
|
||||
|
||||
- **Endpoint:** `GET /v1/jobs/queues/:queueName`
|
||||
- **Description:** Retrieves a list of jobs within a specific queue, with pagination and filtering by status.
|
||||
- **Permissions:** `manage:all`
|
||||
- **URL Parameters:**
|
||||
- `queueName` (string, required): The name of the queue to retrieve jobs from.
|
||||
- **Query Parameters:**
|
||||
- `status` (string, optional): The status of the jobs to retrieve. Can be one of `active`, `completed`, `failed`, `delayed`, `waiting`, `paused`. Defaults to `failed`.
|
||||
- `page` (number, optional): The page number to retrieve. Defaults to `1`.
|
||||
- `limit` (number, optional): The number of jobs to retrieve per page. Defaults to `10`.
|
||||
- **Responses:**
|
||||
- `200 OK`: Returns a detailed view of the queue, including a paginated list of jobs.
|
||||
- `401 Unauthorized`: If the user is not authenticated.
|
||||
- `403 Forbidden`: If the user does not have the required permissions.
|
||||
- `404 Not Found`: If the specified queue does not exist.
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "ingestion",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 56,
|
||||
"failed": 4,
|
||||
"delayed": 3,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
},
|
||||
"jobs": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "initial-import",
|
||||
"data": {
|
||||
"ingestionSourceId": "clx1y2z3a0000b4d2e5f6g7h8"
|
||||
},
|
||||
"state": "failed",
|
||||
"failedReason": "Error: Connection timed out",
|
||||
"timestamp": 1678886400000,
|
||||
"processedOn": 1678886401000,
|
||||
"finishedOn": 1678886402000,
|
||||
"attemptsMade": 5,
|
||||
"stacktrace": ["..."],
|
||||
"returnValue": null,
|
||||
"ingestionSourceId": "clx1y2z3a0000b4d2e5f6g7h8",
|
||||
"error": "Error: Connection timed out"
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"currentPage": 1,
|
||||
"totalPages": 1,
|
||||
"totalJobs": 4,
|
||||
"limit": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
<OAOperation operationId="getQueueJobs" />
|
||||
|
||||
3301
docs/api/openapi.json
Normal file
3301
docs/api/openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,7 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Rate Limiting
|
||||
|
||||
The API implements rate limiting as a security measure to protect your instance from denial-of-service (DoS) and brute-force attacks. This is a crucial feature for maintaining the security and stability of the application.
|
||||
|
||||
@@ -1,50 +1,11 @@
|
||||
# Search Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Search Service provides an endpoint for searching indexed emails.
|
||||
# Search API
|
||||
|
||||
## Endpoints
|
||||
Full-text search over indexed archived emails, powered by Meilisearch.
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
## Search Emails
|
||||
|
||||
### GET /api/v1/search
|
||||
|
||||
Performs a search query against the indexed emails.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
| :----------------- | :----- | :--------------------------------------------------------------------- | :------ |
|
||||
| `keywords` | string | The search query. | |
|
||||
| `page` | number | The page number for pagination. | 1 |
|
||||
| `limit` | number | The number of items per page. | 10 |
|
||||
| `matchingStrategy` | string | The matching strategy to use (`all` or `last`). | `last` |
|
||||
| `filters` | object | Key-value pairs for filtering results (e.g., `from=user@example.com`). | |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** A search result object.
|
||||
|
||||
```json
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"_formatted": {
|
||||
"subject": "<em>Test</em> Email"
|
||||
}
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"page": 1,
|
||||
"limit": 10,
|
||||
"totalPages": 1,
|
||||
"processingTimeMs": 5
|
||||
}
|
||||
```
|
||||
|
||||
- **400 Bad Request:** Keywords are required.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
<OAOperation operationId="searchEmails" />
|
||||
|
||||
15
docs/api/settings.md
Normal file
15
docs/api/settings.md
Normal file
@@ -0,0 +1,15 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Settings API
|
||||
|
||||
Read and update system-wide configuration. The `GET` endpoint is public. The `PUT` endpoint requires `manage:settings` permission.
|
||||
|
||||
## Get System Settings
|
||||
|
||||
<OAOperation operationId="getSystemSettings" />
|
||||
|
||||
## Update System Settings
|
||||
|
||||
<OAOperation operationId="updateSystemSettings" />
|
||||
@@ -1,26 +1,11 @@
|
||||
# Storage Service API
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
The Storage Service provides an endpoint for downloading files from the configured storage provider.
|
||||
# Storage API
|
||||
|
||||
## Endpoints
|
||||
Download files from the configured storage backend (local filesystem or S3-compatible). Requires `read:archive` permission.
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
## Download a File
|
||||
|
||||
### GET /api/v1/storage/download
|
||||
|
||||
Downloads a file from the storage.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------------------------ |
|
||||
| `path` | string | The path to the file within the storage provider. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The file stream.
|
||||
- **400 Bad Request:** File path is required or invalid.
|
||||
- **404 Not Found:** File not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
<OAOperation operationId="downloadFile" />
|
||||
|
||||
11
docs/api/upload.md
Normal file
11
docs/api/upload.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Upload API
|
||||
|
||||
Upload files (PST, EML, MBOX) to temporary storage before creating a file-based ingestion source. The returned `filePath` should be passed as `uploadedFilePath` in the ingestion source `providerConfig`.
|
||||
|
||||
## Upload a File
|
||||
|
||||
<OAOperation operationId="uploadFile" />
|
||||
39
docs/api/users.md
Normal file
39
docs/api/users.md
Normal file
@@ -0,0 +1,39 @@
|
||||
---
|
||||
aside: false
|
||||
---
|
||||
|
||||
# Users API
|
||||
|
||||
Manage user accounts. Creating, updating, and deleting users requires Super Admin (`manage:all`) permission.
|
||||
|
||||
## List All Users
|
||||
|
||||
<OAOperation operationId="getUsers" />
|
||||
|
||||
## Create a User
|
||||
|
||||
<OAOperation operationId="createUser" />
|
||||
|
||||
## Get a User
|
||||
|
||||
<OAOperation operationId="getUser" />
|
||||
|
||||
## Update a User
|
||||
|
||||
<OAOperation operationId="updateUser" />
|
||||
|
||||
## Delete a User
|
||||
|
||||
<OAOperation operationId="deleteUser" />
|
||||
|
||||
## Get Current User Profile
|
||||
|
||||
<OAOperation operationId="getProfile" />
|
||||
|
||||
## Update Current User Profile
|
||||
|
||||
<OAOperation operationId="updateProfile" />
|
||||
|
||||
## Update Password
|
||||
|
||||
<OAOperation operationId="updatePassword" />
|
||||
454
docs/enterprise/legal-holds/api.md
Normal file
454
docs/enterprise/legal-holds/api.md
Normal file
@@ -0,0 +1,454 @@
|
||||
# Legal Holds: API Endpoints
|
||||
|
||||
The legal holds feature exposes a RESTful API for managing holds and linking them to archived emails. All endpoints require authentication and appropriate permissions as specified below.
|
||||
|
||||
**Base URL:** `/api/v1/enterprise/legal-holds`
|
||||
|
||||
All endpoints also require the `LEGAL_HOLDS` feature to be enabled in the enterprise license.
|
||||
|
||||
---
|
||||
|
||||
## Hold Management Endpoints
|
||||
|
||||
### List All Holds
|
||||
|
||||
Retrieves all legal holds ordered by creation date ascending, each annotated with the count of currently linked emails.
|
||||
|
||||
- **Endpoint:** `GET /holds`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"name": "Project Titan Litigation — 2026",
|
||||
"reason": "Preservation order received 2026-01-15 re: IP dispute",
|
||||
"isActive": true,
|
||||
"caseId": null,
|
||||
"emailCount": 4821,
|
||||
"createdAt": "2026-01-15T10:30:00.000Z",
|
||||
"updatedAt": "2026-01-15T10:30:00.000Z"
|
||||
},
|
||||
{
|
||||
"id": "b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||
"name": "SEC Investigation Q3 2025",
|
||||
"reason": null,
|
||||
"isActive": false,
|
||||
"caseId": "c3d4e5f6-a7b8-9012-cdef-345678901234",
|
||||
"emailCount": 310,
|
||||
"createdAt": "2025-09-01T08:00:00.000Z",
|
||||
"updatedAt": "2025-11-20T16:45:00.000Z"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Get Hold by ID
|
||||
|
||||
Retrieves a single legal hold by its UUID.
|
||||
|
||||
- **Endpoint:** `GET /holds/:id`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ---------------------------- |
|
||||
| `id` | `uuid` | The UUID of the hold to get. |
|
||||
|
||||
#### Response
|
||||
|
||||
Returns a single hold object (same shape as the list endpoint), or `404` if not found.
|
||||
|
||||
---
|
||||
|
||||
### Create Hold
|
||||
|
||||
Creates a new legal hold. Holds are always created in the **active** state.
|
||||
|
||||
- **Endpoint:** `POST /holds`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| -------- | -------- | -------- | -------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | Unique hold name. Max 255 characters. |
|
||||
| `reason` | `string` | No | Legal basis or description for the hold. Max 2 000 characters. |
|
||||
| `caseId` | `uuid` | No | Optional UUID of an `ediscovery_cases` record to link to. |
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Project Titan Litigation — 2026",
|
||||
"reason": "Preservation notice received from outside counsel on 2026-01-15 regarding IP dispute with ExCorp.",
|
||||
"caseId": null
|
||||
}
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
- **`201 Created`** — Returns the created hold object with `emailCount: 0`.
|
||||
- **`409 Conflict`** — A hold with this name already exists.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
---
|
||||
|
||||
### Update Hold
|
||||
|
||||
Updates the name, reason, or `isActive` state of a hold. Only the fields provided in the request body are modified.
|
||||
|
||||
- **Endpoint:** `PUT /holds/:id`
|
||||
- **Method:** `PUT`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the hold to update. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
All fields are optional. At least one must be provided.
|
||||
|
||||
| Field | Type | Description |
|
||||
| ---------- | --------- | --------------------------------------------------- |
|
||||
| `name` | `string` | New hold name. Max 255 characters. |
|
||||
| `reason` | `string` | Updated reason/description. Max 2 000 characters. |
|
||||
| `isActive` | `boolean` | Set to `false` to deactivate, `true` to reactivate. |
|
||||
|
||||
#### Example — Deactivate a Hold
|
||||
|
||||
```json
|
||||
{
|
||||
"isActive": false
|
||||
}
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
- **`200 OK`** — Returns the updated hold object.
|
||||
- **`404 Not Found`** — Hold with the given ID does not exist.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
> **Important:** Setting `isActive` to `false` immediately lifts deletion immunity from all emails solely protected by this hold. The next lifecycle worker cycle will evaluate those emails against retention labels and policies.
|
||||
|
||||
---
|
||||
|
||||
### Delete Hold
|
||||
|
||||
Permanently deletes a legal hold and (via database CASCADE) all associated `email_legal_holds` rows.
|
||||
|
||||
- **Endpoint:** `DELETE /holds/:id`
|
||||
- **Method:** `DELETE`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the hold to delete. |
|
||||
|
||||
#### Response
|
||||
|
||||
- **`204 No Content`** — Hold successfully deleted.
|
||||
- **`404 Not Found`** — Hold with the given ID does not exist.
|
||||
- **`409 Conflict`** — The hold is currently active. Deactivate it first by calling `PUT /holds/:id` with `{ "isActive": false }`.
|
||||
|
||||
> **Security note:** Active holds cannot be deleted. This requirement forces an explicit, auditable deactivation step before the hold record is removed.
|
||||
|
||||
---
|
||||
|
||||
## Bulk Operations
|
||||
|
||||
### Bulk Apply Hold via Search Query
|
||||
|
||||
Applies a legal hold to **all emails matching a Meilisearch query**. The operation is asynchronous-safe: the UI fires the request and the server processes results in pages of 1 000, so even very large result sets do not time out.
|
||||
|
||||
- **Endpoint:** `POST /holds/:id/bulk-apply`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------ |
|
||||
| `id` | `uuid` | The UUID of the hold to apply. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| ------------- | -------- | -------- | ------------------------------------------------- |
|
||||
| `searchQuery` | `object` | Yes | A Meilisearch query object (see structure below). |
|
||||
|
||||
##### `searchQuery` Object
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| ------------------ | -------- | -------- | ------------------------------------------------------------------- |
|
||||
| `query` | `string` | Yes | Full-text search string. Pass `""` to match all documents. |
|
||||
| `filters` | `object` | No | Key-value filter object (e.g., `{ "from": "user@corp.com" }`). |
|
||||
| `matchingStrategy` | `string` | No | Meilisearch matching strategy: `"last"`, `"all"`, or `"frequency"`. |
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"searchQuery": {
|
||||
"query": "Project Titan confidential",
|
||||
"filters": {
|
||||
"from": "john.doe@acme.com",
|
||||
"startDate": "2023-01-01",
|
||||
"endDate": "2025-12-31"
|
||||
},
|
||||
"matchingStrategy": "all"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"legalHoldId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"emailsLinked": 1247,
|
||||
"queryUsed": {
|
||||
"query": "Project Titan confidential",
|
||||
"filters": {
|
||||
"from": "john.doe@acme.com",
|
||||
"startDate": "2023-01-01",
|
||||
"endDate": "2025-12-31"
|
||||
},
|
||||
"matchingStrategy": "all"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- `emailsLinked` — The number of emails **newly** linked to the hold by this operation. Emails already linked to this hold are not counted.
|
||||
- `queryUsed` — The exact query JSON that was executed, mirroring what was written to the audit log for GoBD proof of scope.
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Operation completed. Returns `emailsLinked: 0` if no new emails matched.
|
||||
- **`404 Not Found`** — Hold with the given ID does not exist.
|
||||
- **`409 Conflict`** — The hold is inactive. Only active holds can receive new email links.
|
||||
- **`422 Unprocessable Entity`** — Invalid request body.
|
||||
|
||||
---
|
||||
|
||||
### Release All Emails from Hold
|
||||
|
||||
Removes all `email_legal_holds` associations for the given hold in a single operation. The hold itself is **not** deleted.
|
||||
|
||||
- **Endpoint:** `POST /holds/:id/release-all`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | -------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the hold to release. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"emailsReleased": 4821
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — All email associations removed. Returns `emailsReleased: 0` if the hold had no linked emails.
|
||||
- **`500 Internal Server Error`** — The hold ID was not found or a database error occurred.
|
||||
|
||||
> **Warning:** After release, emails that were solely protected by this hold will be evaluated normally on the next lifecycle worker cycle. Emails with expired retention periods will be deleted.
|
||||
|
||||
---
|
||||
|
||||
## Per-Email Hold Endpoints
|
||||
|
||||
### Get Holds Applied to an Email
|
||||
|
||||
Returns all legal holds currently linked to a specific archived email, including both active and inactive holds.
|
||||
|
||||
- **Endpoint:** `GET /email/:emailId/holds`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `read:archive`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
Returns an empty array `[]` if no holds are applied, or an array of hold-link objects:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"legalHoldId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"holdName": "Project Titan Litigation — 2026",
|
||||
"isActive": true,
|
||||
"appliedAt": "2026-01-15T11:00:00.000Z",
|
||||
"appliedByUserId": "user-uuid-here"
|
||||
},
|
||||
{
|
||||
"legalHoldId": "b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||
"holdName": "SEC Investigation Q3 2025",
|
||||
"isActive": false,
|
||||
"appliedAt": "2025-09-05T09:15:00.000Z",
|
||||
"appliedByUserId": null
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Returns the array of hold-link objects (may be empty).
|
||||
|
||||
---
|
||||
|
||||
### Apply a Hold to a Specific Email
|
||||
|
||||
Links a single archived email to an active legal hold. The operation is idempotent — linking the same email to the same hold twice has no effect.
|
||||
|
||||
- **Endpoint:** `POST /email/:emailId/holds`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| -------- | ------ | -------- | ------------------------------ |
|
||||
| `holdId` | `uuid` | Yes | The UUID of the hold to apply. |
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"holdId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Body
|
||||
|
||||
Returns the hold-link object with the DB-authoritative `appliedAt` timestamp:
|
||||
|
||||
```json
|
||||
{
|
||||
"legalHoldId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"holdName": "Project Titan Litigation — 2026",
|
||||
"isActive": true,
|
||||
"appliedAt": "2026-01-16T14:22:00.000Z",
|
||||
"appliedByUserId": "user-uuid-here"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Hold successfully applied (or was already applied — idempotent).
|
||||
- **`404 Not Found`** — Email or hold not found.
|
||||
- **`409 Conflict`** — The hold is inactive and cannot be applied to new emails.
|
||||
- **`422 Unprocessable Entity`** — Invalid request body.
|
||||
|
||||
---
|
||||
|
||||
### Remove a Hold from a Specific Email
|
||||
|
||||
Unlinks a specific legal hold from a specific archived email. The hold itself is not modified; other emails linked to the same hold are unaffected.
|
||||
|
||||
- **Endpoint:** `DELETE /email/:emailId/holds/:holdId`
|
||||
- **Method:** `DELETE`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
| `holdId` | `uuid` | The UUID of the hold to remove. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Hold removed from email successfully."
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Hold link removed.
|
||||
- **`404 Not Found`** — No such hold was applied to this email.
|
||||
|
||||
---
|
||||
|
||||
## Error Responses
|
||||
|
||||
All endpoints use the standard error response format:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "error",
|
||||
"statusCode": 409,
|
||||
"message": "Cannot delete an active legal hold. Deactivate it first to explicitly lift legal protection before deletion.",
|
||||
"errors": null
|
||||
}
|
||||
```
|
||||
|
||||
For validation errors (`422 Unprocessable Entity`):
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "error",
|
||||
"statusCode": 422,
|
||||
"message": "Invalid input provided.",
|
||||
"errors": [
|
||||
{
|
||||
"field": "name",
|
||||
"message": "Name is required."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Validation Constraints
|
||||
|
||||
| Field | Constraint |
|
||||
| ------------------ | ---------------------------------------- |
|
||||
| Hold name | 1–255 characters. |
|
||||
| Reason | Max 2 000 characters. |
|
||||
| `caseId` | Must be a valid UUID if provided. |
|
||||
| `holdId` | Must be a valid UUID. |
|
||||
| `emailId` | Must be a valid UUID. |
|
||||
| Search `query` | String (may be empty `""`). |
|
||||
| `matchingStrategy` | One of `"last"`, `"all"`, `"frequency"`. |
|
||||
164
docs/enterprise/legal-holds/guide.md
Normal file
164
docs/enterprise/legal-holds/guide.md
Normal file
@@ -0,0 +1,164 @@
|
||||
# Legal Holds: User Interface Guide
|
||||
|
||||
The legal holds management interface is located at **Dashboard → Compliance → Legal Holds**. It provides a complete view of all configured holds and tools for creating, applying, releasing, and deactivating them. Per-email hold controls are also available on each archived email's detail page.
|
||||
|
||||
## Overview
|
||||
|
||||
Legal holds suspend all automated and manual deletion for specific emails, regardless of any retention labels or policies that might otherwise govern them. They are the highest-priority mechanism in the data lifecycle and are intended for use by compliance officers and legal counsel responding to litigation, investigations, or audit requests.
|
||||
|
||||
## Holds Table
|
||||
|
||||
The main page displays a table of all legal holds with the following columns:
|
||||
|
||||
- **Name:** The hold name and its UUID displayed underneath for reference.
|
||||
- **Reason:** A short excerpt of the hold's reason/description. Shows _"No reason provided"_ if omitted.
|
||||
- **Emails:** A badge showing how many archived emails are currently linked to this hold.
|
||||
- **Status:** A badge indicating whether the hold is:
|
||||
- **Active** (red badge): The hold is currently granting deletion immunity to linked emails.
|
||||
- **Inactive** (gray badge): The hold is deactivated; linked emails are no longer immune.
|
||||
- **Created At:** The date the hold was created, in local date format.
|
||||
- **Actions:** Dropdown menu with options depending on the hold's state (see below).
|
||||
|
||||
The table is sorted by creation date in ascending order.
|
||||
|
||||
## Creating a Hold
|
||||
|
||||
Click the **"Create New"** button above the table to open the creation dialog. New holds are always created in the **Active** state.
|
||||
|
||||
### Form Fields
|
||||
|
||||
- **Name** (Required): A unique, descriptive name. Maximum 255 characters.
|
||||
Examples: `"Project Titan Litigation — 2026"`, `"SEC Investigation Q3 2025"`
|
||||
- **Reason** (Optional): A free-text description of the legal basis for the hold. Maximum 2 000 characters. This appears in the audit log and is visible to other compliance officers.
|
||||
|
||||
### After Creation
|
||||
|
||||
The hold immediately becomes active. No emails are linked to it yet — use Bulk Apply or the individual email detail page to add emails.
|
||||
|
||||
## Editing a Hold
|
||||
|
||||
Click **Edit** from the actions dropdown to modify the hold's name or reason. The `isActive` state is changed separately via the **Activate / Deactivate** action.
|
||||
|
||||
## Activating and Deactivating a Hold
|
||||
|
||||
The **Deactivate** / **Activate** option appears inline in the actions dropdown. Changing the active state does not remove any email links — it only determines whether those links grant deletion immunity.
|
||||
|
||||
> **Important:** Deactivating a hold means that all emails linked _solely_ to this hold lose their deletion immunity immediately. If any such emails have an expired retention period, they will be permanently deleted on the very next lifecycle worker cycle.
|
||||
|
||||
## Deleting a Hold
|
||||
|
||||
A hold **cannot be deleted while it is active**. Attempting to delete an active hold returns a `409 Conflict` error with the message: _"Cannot delete an active legal hold. Deactivate it first..."_
|
||||
|
||||
To delete a hold:
|
||||
|
||||
1. **Deactivate** it first using the Activate/Deactivate action.
|
||||
2. Click **Delete** from the actions dropdown.
|
||||
3. Confirm in the dialog.
|
||||
|
||||
Deletion permanently removes the hold record and, via database CASCADE, all `email_legal_holds` link rows. The emails themselves are not deleted — they simply lose the protection that this hold was providing. Any other active holds on those emails continue to protect them.
|
||||
|
||||
## Bulk Apply
|
||||
|
||||
The **Bulk Apply** option (available only on active holds) opens a search dialog that lets you cast a preservation net across potentially thousands of emails in a single operation.
|
||||
|
||||
### Search Fields
|
||||
|
||||
- **Full-text query:** Keywords to match against email subject, body, and attachment content. This uses Meilisearch's full-text engine with typo tolerance.
|
||||
- **From (sender):** Filter by sender email address.
|
||||
- **Start date / End date:** Filter by the date range of the email's `sentAt` field.
|
||||
|
||||
At least one of these fields must be filled before the **Apply Hold** button becomes enabled.
|
||||
|
||||
### What Happens During Bulk Apply
|
||||
|
||||
1. The system pages through all Meilisearch results matching the query (1 000 hits per page).
|
||||
2. Each hit's email ID is validated against the database to discard any stale index entries.
|
||||
3. New hold links are inserted in batches of 500. Emails already linked to this hold are skipped (idempotent).
|
||||
4. A success notification shows **how many emails were newly placed under the hold** (already-protected emails are not counted again).
|
||||
5. The exact search query JSON is written to the audit log as GoBD proof of the scope of protection.
|
||||
|
||||
> **Warning:** Bulk Apply is a wide-net operation. Review your query carefully — there is no per-email confirmation step. Use the search page first to preview results before applying.
|
||||
|
||||
### Bulk Apply and the Audit Log
|
||||
|
||||
The audit log entry for a bulk apply contains:
|
||||
|
||||
- `action: "BulkApplyHold"`
|
||||
- `searchQuery`: the exact JSON query used
|
||||
- `emailsLinked`: number of emails newly linked
|
||||
- `emailsAlreadyProtected`: number of emails that were already under this hold
|
||||
|
||||
## Release All Emails
|
||||
|
||||
The **Release All** option (available when the hold has at least one linked email) removes every `email_legal_holds` link for this hold in a single operation.
|
||||
|
||||
> **Warning:** This immediately lifts deletion immunity for all emails that were solely protected by this hold. Emails with expired retention periods will be deleted on the next lifecycle worker cycle.
|
||||
|
||||
A confirmation dialog is shown before the operation proceeds. On success, a notification reports how many email links were removed.
|
||||
|
||||
## Per-Email Hold Controls
|
||||
|
||||
### Viewing Holds on a Specific Email
|
||||
|
||||
On any archived email's detail page, the **Legal Holds** card lists all holds currently applied to that email, showing:
|
||||
|
||||
- Hold name and active/inactive badge
|
||||
- Date the hold was applied
|
||||
|
||||
### Applying a Hold to a Specific Email
|
||||
|
||||
In the Legal Holds card, a dropdown lists all currently **active** holds. Select a hold and click **Apply**. The operation is idempotent — applying the same hold twice has no effect.
|
||||
|
||||
### Removing a Hold from a Specific Email
|
||||
|
||||
Each linked hold in the card has a **Remove** button. Clicking it removes only the link between this email and that specific hold. The hold itself remains and continues to protect other emails.
|
||||
|
||||
> **Note:** Removing the last active hold from an email means the email is no longer immune. If its retention period has expired, it will be deleted on the next lifecycle worker cycle.
|
||||
|
||||
### Delete Button Behaviour Under a Hold
|
||||
|
||||
The **Delete Email** button on the email detail page is not disabled in the UI, but the backend will reject the request if the email is under an active hold. An error toast is displayed: _"Deletion blocked by retention policy (Legal Hold or similar)."_
|
||||
|
||||
## Permissions Reference
|
||||
|
||||
| Operation | Required Permission |
|
||||
| -------------------------------- | ------------------- |
|
||||
| View holds table | `manage:all` |
|
||||
| Create / edit / delete a hold | `manage:all` |
|
||||
| Activate / deactivate a hold | `manage:all` |
|
||||
| Bulk apply | `manage:all` |
|
||||
| Release all emails from a hold | `manage:all` |
|
||||
| View holds on a specific email | `read:archive` |
|
||||
| Apply / remove a hold from email | `manage:all` |
|
||||
|
||||
## Workflow: Responding to a Litigation Notice
|
||||
|
||||
1. **Receive the litigation notice.** Identify the relevant custodians, date range, and keywords.
|
||||
2. **Create a hold**: Navigate to Dashboard → Compliance → Legal Holds and click **Create New**. Name it descriptively (e.g., `"Doe v. Acme Corp — 2026"`). Add the legal matter reference as the reason.
|
||||
3. **Bulk apply**: Click **Bulk Apply** on the new hold. Enter keywords, the custodian's email address in the **From** field, and the relevant date range. Submit.
|
||||
4. **Verify**: Check the email count badge on the hold row. Review the audit log to confirm the search query was recorded.
|
||||
5. **Individual additions**: If specific emails not captured by the bulk query need to be preserved, open each email's detail page and apply the hold manually.
|
||||
6. **When the matter concludes**: Click **Deactivate** on the hold, then **Release All** to remove all email links, and finally **Delete** the hold record if desired.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Cannot Delete Hold — "Cannot delete an active legal hold"
|
||||
|
||||
**Cause:** The hold is still active.
|
||||
**Solution:** Use the **Deactivate** option from the actions dropdown first.
|
||||
|
||||
### Bulk Apply Returns 0 Emails
|
||||
|
||||
**Cause 1:** The search query matched no documents in the Meilisearch index.
|
||||
**Solution:** Verify the query in the main Search page to preview results before applying.
|
||||
**Cause 2:** All Meilisearch results were stale (emails deleted from the archive before this operation).
|
||||
**Solution:** This is a data state issue; the stale index entries will be cleaned up on the next index rebuild.
|
||||
|
||||
### Delete Email Returns an Error Instead of Deleting
|
||||
|
||||
**Cause:** The email is under one or more active legal holds.
|
||||
**Solution:** This is expected behavior. Deactivate or remove the hold(s) from this email before deleting.
|
||||
|
||||
### Hold Emails Count Shows 0 After Bulk Apply
|
||||
|
||||
**Cause:** The `emailCount` field is fetched when the page loads. If the bulk operation was just completed, refresh the page to see the updated count.
|
||||
125
docs/enterprise/legal-holds/index.md
Normal file
125
docs/enterprise/legal-holds/index.md
Normal file
@@ -0,0 +1,125 @@
|
||||
# Legal Holds
|
||||
|
||||
The Legal Holds feature is an enterprise-grade eDiscovery and compliance mechanism designed to prevent the spoliation (destruction) of evidence. It provides **absolute, unconditional immunity** from deletion for archived emails that are relevant to pending litigation, regulatory investigations, or audits.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Absolute Immunity — Highest Precedence in the Lifecycle Pipeline
|
||||
|
||||
A legal hold is the final word on whether an email can be deleted. The [Lifecycle Worker](../retention-policy/lifecycle-worker.md) evaluates emails in a strict three-step precedence pipeline:
|
||||
|
||||
1. **Step 0 — Legal Hold** ← this feature
|
||||
2. Step 1 — Retention Label
|
||||
3. Step 2 — Retention Policy
|
||||
|
||||
If an email is linked to **at least one active** legal hold, the lifecycle worker immediately flags it as immune and stops evaluation. No retention label or policy can override this decision. The `RetentionHook` mechanism also blocks any **manual deletion** attempt from the UI — the backend will return an error before any `DELETE` SQL is issued.
|
||||
|
||||
### 2. Many-to-Many Relationship
|
||||
|
||||
A single email can be placed under multiple holds simultaneously (e.g., one hold for a litigation case and another for a regulatory investigation). The email remains immune as long as **any one** of those holds is active. Each hold-to-email link is recorded independently with its own `appliedAt` timestamp and actor attribution.
|
||||
|
||||
### 3. Active/Inactive State Management
|
||||
|
||||
Every hold has an `isActive` flag. When a legal matter concludes, the responsible officer deactivates the hold. The deactivation is instantaneous — on the very next lifecycle worker cycle, emails that were solely protected by that hold will be evaluated normally against retention labels and policies. If their retention period has already expired, they will be permanently deleted in that same cycle.
|
||||
|
||||
A hold **must be deactivated before it can be deleted**. This requirement forces an explicit, auditable act of lifting legal protection before the hold record can be removed from the system.
|
||||
|
||||
### 4. Bulk Preservation via Search Queries
|
||||
|
||||
The primary use case for legal holds is casting a wide preservation net quickly. The bulk-apply operation accepts a full Meilisearch query (full-text search + metadata filters such as sender, date range, etc.) and links every matching email to the hold in a single operation. The system pages through results in batches of 1 000 to handle datasets of any size without timing out the UI.
|
||||
|
||||
### 5. GoBD Audit Trail
|
||||
|
||||
Every action within the legal hold module — hold creation, modification, deactivation, deletion, email linkage, email removal, and bulk operations — is immutably recorded in the cryptographically chained `audit_logs` table. For bulk operations, the exact `SearchQuery` JSON used to cast the hold net is persisted in the audit log as proof of scope, satisfying GoBD and similar evidence-preservation requirements.
|
||||
|
||||
## Feature Requirements
|
||||
|
||||
The Legal Holds feature requires:
|
||||
|
||||
- An active **Enterprise license** with the `LEGAL_HOLDS` feature enabled.
|
||||
- The `manage:all` permission for all hold management and bulk operations.
|
||||
- The `read:archive` permission for viewing holds applied to a specific email.
|
||||
- The `manage:all` permission for applying or removing a hold from an individual email.
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Active Litigation Hold
|
||||
|
||||
Upon receiving a litigation notice, a compliance officer creates a hold named "Project Titan Litigation — 2026", applies it via a bulk query scoped to a specific custodian's emails and a date range, and immediately freezes those records. The audit log provides timestamped proof that the hold was in place from the moment of creation.
|
||||
|
||||
### Regulatory Investigation
|
||||
|
||||
A regulator requests preservation of all finance-related communications from a specific period. The officer creates a hold and uses a keyword + date-range bulk query to capture every relevant email in seconds, regardless of which users sent or received them.
|
||||
|
||||
### Tax Audit
|
||||
|
||||
Before an annual audit window, an officer applies a hold to all emails matching tax-relevant keywords. The hold is released once the audit concludes, and standard retention policies resume.
|
||||
|
||||
### eDiscovery Case Management
|
||||
|
||||
Holds can optionally be linked to an `ediscovery_cases` record (`caseId` field) to organise multiple holds under a single legal matter. This allows all holds, emails, and audit events for a case to be referenced together.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
| Component | Location | Description |
|
||||
| --------------- | ---------------------------------------------------------------------- | -------------------------------------------------------------- |
|
||||
| Types | `packages/types/src/retention.types.ts` | `LegalHold`, `EmailLegalHoldInfo`, `BulkApplyHoldResult` types |
|
||||
| Database Schema | `packages/backend/src/database/schema/compliance.ts` | `legal_holds` and `email_legal_holds` table definitions |
|
||||
| Service | `packages/enterprise/src/modules/legal-holds/LegalHoldService.ts` | All business logic for CRUD, linkage, and bulk operations |
|
||||
| Controller | `packages/enterprise/src/modules/legal-holds/legal-hold.controller.ts` | Express request handlers with Zod validation |
|
||||
| Routes | `packages/enterprise/src/modules/legal-holds/legal-hold.routes.ts` | Route registration with auth and feature guards |
|
||||
| Module | `packages/enterprise/src/modules/legal-holds/legal-hold.module.ts` | App-startup integration and `RetentionHook` registration |
|
||||
| Frontend Page | `packages/frontend/src/routes/dashboard/compliance/legal-holds/` | SvelteKit management page for holds |
|
||||
| Email Detail | `packages/frontend/src/routes/dashboard/archived-emails/[id]/` | Per-email hold card in the email detail view |
|
||||
| Lifecycle Guard | `packages/backend/src/hooks/RetentionHook.ts` | Static hook that blocks deletion if a hold is active |
|
||||
|
||||
## Data Model
|
||||
|
||||
### `legal_holds` Table
|
||||
|
||||
| Column | Type | Description |
|
||||
| ------------ | -------------- | --------------------------------------------------------------------------- |
|
||||
| `id` | `uuid` (PK) | Auto-generated unique identifier. |
|
||||
| `name` | `varchar(255)` | Human-readable hold name. |
|
||||
| `reason` | `text` | Optional description of why the hold was placed. |
|
||||
| `is_active` | `boolean` | Whether the hold currently grants immunity. Defaults to `true` on creation. |
|
||||
| `case_id` | `uuid` (FK) | Optional reference to an `ediscovery_cases` row. |
|
||||
| `created_at` | `timestamptz` | Hold creation timestamp. |
|
||||
| `updated_at` | `timestamptz` | Last modification timestamp. |
|
||||
|
||||
### `email_legal_holds` Join Table
|
||||
|
||||
| Column | Type | Description |
|
||||
| -------------------- | ------------- | ----------------------------------------------------------- |
|
||||
| `email_id` | `uuid` (FK) | Reference to `archived_emails.id`. Cascades on delete. |
|
||||
| `legal_hold_id` | `uuid` (FK) | Reference to `legal_holds.id`. Cascades on delete. |
|
||||
| `applied_at` | `timestamptz` | DB-server timestamp of when the link was created. |
|
||||
| `applied_by_user_id` | `uuid` (FK) | User who applied the hold (nullable for system operations). |
|
||||
|
||||
The table uses a composite primary key of `(email_id, legal_hold_id)`, enforcing uniqueness at the database level. Duplicate inserts use `ON CONFLICT DO NOTHING` for idempotency.
|
||||
|
||||
## Integration Points
|
||||
|
||||
### RetentionHook (Deletion Guard)
|
||||
|
||||
`LegalHoldModule.initialize()` registers an async check with `RetentionHook` at application startup. `ArchivedEmailService.deleteArchivedEmail()` calls `RetentionHook.canDelete(emailId)` before any storage or database DELETE. If the email is under an active hold, the hook returns `false` and deletion is aborted with a `400 Bad Request` error. This guard is fail-safe: if the hook itself throws an error, deletion is also blocked.
|
||||
|
||||
### Lifecycle Worker
|
||||
|
||||
The lifecycle worker calls `legalHoldService.isEmailUnderActiveHold(emailId)` as the first step in its per-email evaluation loop. Immune emails are skipped immediately with a `debug`-level log entry; no further evaluation occurs.
|
||||
|
||||
### Audit Log
|
||||
|
||||
All legal hold operations generate entries in `audit_logs`:
|
||||
|
||||
| Action | `actionType` | `targetType` | `targetId` |
|
||||
| --------------------------------- | ------------ | --------------- | -------------------- |
|
||||
| Hold created | `CREATE` | `LegalHold` | hold ID |
|
||||
| Hold updated / deactivated | `UPDATE` | `LegalHold` | hold ID |
|
||||
| Hold deleted | `DELETE` | `LegalHold` | hold ID |
|
||||
| Email linked to hold (individual) | `UPDATE` | `ArchivedEmail` | email ID |
|
||||
| Email unlinked from hold | `UPDATE` | `ArchivedEmail` | email ID |
|
||||
| Bulk apply via search | `UPDATE` | `LegalHold` | hold ID + query JSON |
|
||||
| All emails released from hold | `UPDATE` | `LegalHold` | hold ID |
|
||||
|
||||
Individual email link/unlink events target `ArchivedEmail` so that a per-email audit search surfaces the complete hold history for that email.
|
||||
360
docs/enterprise/retention-labels/api.md
Normal file
360
docs/enterprise/retention-labels/api.md
Normal file
@@ -0,0 +1,360 @@
|
||||
# Retention Labels: API Endpoints
|
||||
|
||||
The retention labels feature exposes a RESTful API for managing retention labels and applying them to individual archived emails. All endpoints require authentication and appropriate permissions as specified below.
|
||||
|
||||
**Base URL:** `/api/v1/enterprise/retention-policy`
|
||||
|
||||
All endpoints also require the `RETENTION_POLICY` feature to be enabled in the enterprise license.
|
||||
|
||||
---
|
||||
|
||||
## Label Management Endpoints
|
||||
|
||||
### List All Labels
|
||||
|
||||
Retrieves all retention labels, ordered by creation date ascending.
|
||||
|
||||
- **Endpoint:** `GET /labels`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"name": "Legal Hold - Litigation ABC",
|
||||
"description": "Extended retention for emails related to litigation ABC vs Company",
|
||||
"retentionPeriodDays": 2555,
|
||||
"isDisabled": false,
|
||||
"createdAt": "2025-10-01T00:00:00.000Z"
|
||||
},
|
||||
{
|
||||
"id": "b2c3d4e5-f6a7-8901-bcde-f23456789012",
|
||||
"name": "Executive Communications",
|
||||
"description": null,
|
||||
"retentionPeriodDays": 3650,
|
||||
"isDisabled": true,
|
||||
"createdAt": "2025-09-15T12:30:00.000Z"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Get Label by ID
|
||||
|
||||
Retrieves a single retention label by its UUID.
|
||||
|
||||
- **Endpoint:** `GET /labels/:id`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ----------------------------- |
|
||||
| `id` | `uuid` | The UUID of the label to get. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
Returns a single label object (same shape as the list endpoint), or `404` if not found.
|
||||
|
||||
---
|
||||
|
||||
### Create Label
|
||||
|
||||
Creates a new retention label. The label name must be unique across the system.
|
||||
|
||||
- **Endpoint:** `POST /labels`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| --------------------- | --------- | -------- | ----------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | Unique label name. Max 255 characters. |
|
||||
| `description` | `string` | No | Human-readable description. Max 1000 characters. |
|
||||
| `retentionPeriodDays` | `integer` | Yes | Number of days to retain emails with this label. Minimum 1. |
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Financial Records - Q4 2025",
|
||||
"description": "Extended retention for Q4 2025 financial correspondence per regulatory requirements",
|
||||
"retentionPeriodDays": 2555
|
||||
}
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
- **`201 Created`** — Returns the created label object.
|
||||
- **`409 Conflict`** — A label with this name already exists.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
---
|
||||
|
||||
### Update Label
|
||||
|
||||
Updates an existing retention label. Only the fields included in the request body are modified.
|
||||
|
||||
- **Endpoint:** `PUT /labels/:id`
|
||||
- **Method:** `PUT`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | -------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the label to update. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
All fields from the create endpoint are accepted, and all are optional. Only provided fields are updated.
|
||||
|
||||
**Important:** The `retentionPeriodDays` field cannot be modified if the label is currently applied to any emails. Attempting to do so will return a `409 Conflict` error.
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Financial Records - Q4 2025 (Updated)",
|
||||
"description": "Updated description for Q4 2025 financial records retention"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response
|
||||
|
||||
- **`200 OK`** — Returns the updated label object.
|
||||
- **`404 Not Found`** — Label with the given ID does not exist.
|
||||
- **`409 Conflict`** — Attempted to modify retention period while label is applied to emails.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
---
|
||||
|
||||
### Delete Label
|
||||
|
||||
Deletes or disables a retention label depending on its usage status.
|
||||
|
||||
- **Endpoint:** `DELETE /labels/:id`
|
||||
- **Method:** `DELETE`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | -------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the label to delete. |
|
||||
|
||||
#### Deletion Logic
|
||||
|
||||
- **Hard Delete**: If the label has never been applied to any emails, it is permanently removed.
|
||||
- **Soft Disable**: If the label is currently applied to one or more emails, it is marked as `isDisabled = true` instead of being deleted. This preserves the retention clock for tagged emails while preventing new applications.
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "deleted"
|
||||
}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "disabled"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Label successfully deleted or disabled. Check the `action` field in the response body.
|
||||
- **`404 Not Found`** — Label with the given ID does not exist.
|
||||
|
||||
---
|
||||
|
||||
## Email Label Endpoints
|
||||
|
||||
### Get Email's Label
|
||||
|
||||
Retrieves the retention label currently applied to a specific archived email.
|
||||
|
||||
- **Endpoint:** `GET /email/:emailId/label`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `read:archive`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
Returns `null` if no label is applied:
|
||||
|
||||
```json
|
||||
null
|
||||
```
|
||||
|
||||
Or the label information if a label is applied:
|
||||
|
||||
```json
|
||||
{
|
||||
"labelId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"labelName": "Legal Hold - Litigation ABC",
|
||||
"retentionPeriodDays": 2555,
|
||||
"appliedAt": "2025-10-15T14:30:00.000Z",
|
||||
"appliedByUserId": "user123"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Returns label information or `null`.
|
||||
- **`500 Internal Server Error`** — Server error during processing.
|
||||
|
||||
---
|
||||
|
||||
### Apply Label to Email
|
||||
|
||||
Applies a retention label to an archived email. If the email already has a label, the existing label is replaced.
|
||||
|
||||
- **Endpoint:** `POST /email/:emailId/label`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `delete:archive`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| --------- | ------ | -------- | ------------------------------- |
|
||||
| `labelId` | `uuid` | Yes | The UUID of the label to apply. |
|
||||
|
||||
#### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"labelId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"labelId": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"labelName": "Legal Hold - Litigation ABC",
|
||||
"retentionPeriodDays": 2555,
|
||||
"appliedAt": "2025-10-15T14:30:00.000Z",
|
||||
"appliedByUserId": "user123"
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Label successfully applied.
|
||||
- **`404 Not Found`** — Email or label not found.
|
||||
- **`409 Conflict`** — Attempted to apply a disabled label.
|
||||
- **`422 Unprocessable Entity`** — Invalid request body.
|
||||
|
||||
---
|
||||
|
||||
### Remove Label from Email
|
||||
|
||||
Removes the retention label from an archived email if one is applied.
|
||||
|
||||
- **Endpoint:** `DELETE /email/:emailId/label`
|
||||
- **Method:** `DELETE`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `delete:archive`
|
||||
|
||||
#### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------- |
|
||||
| `emailId` | `uuid` | The UUID of the archived email. |
|
||||
|
||||
#### Response Body
|
||||
|
||||
If a label was removed:
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Label removed successfully."
|
||||
}
|
||||
```
|
||||
|
||||
If no label was applied:
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "No label was applied to this email."
|
||||
}
|
||||
```
|
||||
|
||||
#### Response Codes
|
||||
|
||||
- **`200 OK`** — Operation completed (regardless of whether a label was actually removed).
|
||||
- **`500 Internal Server Error`** — Server error during processing.
|
||||
|
||||
---
|
||||
|
||||
## Error Responses
|
||||
|
||||
All endpoints use the standard error response format:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "error",
|
||||
"statusCode": 404,
|
||||
"message": "The requested resource could not be found.",
|
||||
"errors": null
|
||||
}
|
||||
```
|
||||
|
||||
For validation errors (`422 Unprocessable Entity`):
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "error",
|
||||
"statusCode": 422,
|
||||
"message": "Invalid input provided.",
|
||||
"errors": [
|
||||
{
|
||||
"field": "name",
|
||||
"message": "Name is required."
|
||||
},
|
||||
{
|
||||
"field": "retentionPeriodDays",
|
||||
"message": "Retention period must be at least 1 day."
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Validation Constraints
|
||||
|
||||
| Field | Constraint |
|
||||
| ---------------- | --------------------------------- |
|
||||
| Label name | 1–255 characters, must be unique. |
|
||||
| Description | Max 1000 characters. |
|
||||
| Retention period | Positive integer (≥ 1 day). |
|
||||
| Label ID (UUID) | Must be a valid UUID format. |
|
||||
| Email ID (UUID) | Must be a valid UUID format. |
|
||||
267
docs/enterprise/retention-labels/automated-tagging.md
Normal file
267
docs/enterprise/retention-labels/automated-tagging.md
Normal file
@@ -0,0 +1,267 @@
|
||||
# Retention Labels: Automated Application Guide
|
||||
|
||||
This guide explains how to use the API to automatically apply retention labels to archived emails, enabling automated compliance and retention management workflows.
|
||||
|
||||
## Overview
|
||||
|
||||
Automated retention label application allows external systems and services to programmatically tag emails with appropriate retention labels based on content analysis, business rules, or regulatory requirements. This eliminates manual tagging for large volumes of emails while ensuring consistent retention policy enforcement.
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
### 1. Financial Document Classification
|
||||
|
||||
**Scenario**: Automatically identify and tag financial documents (invoices, receipts, payment confirmations) with extended retention periods for regulatory compliance.
|
||||
|
||||
**Implementation**:
|
||||
|
||||
- Monitor newly ingested emails for financial keywords in subject lines or attachment names
|
||||
- Apply "Financial Records" label (typically 7+ years retention) to matching emails
|
||||
- Use content analysis to identify financial document types
|
||||
|
||||
### 2. Legal and Compliance Tagging
|
||||
|
||||
**Scenario**: Apply legal hold labels to emails related to ongoing litigation or regulatory investigations.
|
||||
|
||||
**Implementation**:
|
||||
|
||||
- Scan emails for legal-related keywords or specific case references
|
||||
- Tag emails from/to legal departments with "Legal Hold" labels
|
||||
- Apply extended retention periods to preserve evidence
|
||||
|
||||
### 3. Executive Communication Preservation
|
||||
|
||||
**Scenario**: Ensure important communications involving executive leadership are retained beyond standard policies.
|
||||
|
||||
**Implementation**:
|
||||
|
||||
- Identify emails from C-level executives (CEO, CFO, CTO, etc.)
|
||||
- Apply "Executive Communications" labels with extended retention
|
||||
- Preserve strategic business communications for historical reference
|
||||
|
||||
### 4. Data Classification Integration
|
||||
|
||||
**Scenario**: Integrate with existing data classification systems to apply retention labels based on content sensitivity.
|
||||
|
||||
**Implementation**:
|
||||
|
||||
- Use AI/ML classification results to determine retention requirements
|
||||
- Apply labels like "Confidential", "Public", or "Restricted" with appropriate retention periods
|
||||
- Automate compliance with data protection regulations
|
||||
|
||||
### 5. Project-Based Retention
|
||||
|
||||
**Scenario**: Apply specific retention periods to emails related to particular projects or contracts.
|
||||
|
||||
**Implementation**:
|
||||
|
||||
- Identify project-related emails using subject line patterns or participant lists
|
||||
- Tag with project-specific labels (e.g., "Project Alpha - 5 Year Retention")
|
||||
- Ensure project documentation meets contractual retention requirements
|
||||
|
||||
## API Workflow
|
||||
|
||||
### Step 1: Authentication Setup
|
||||
|
||||
Create an API key with appropriate permissions:
|
||||
|
||||
- Navigate to **Dashboard → Admin → Roles/Users**
|
||||
- Create a user with `read:archive` and `delete:archive` permissions (minimum required)
|
||||
- Generate an API for the newly created user
|
||||
- Securely store the API key for use in automated systems
|
||||
|
||||
### Step 2: Identify Target Emails
|
||||
|
||||
Use the archived emails API to find emails that need labeling:
|
||||
|
||||
**Get Recent Emails**:
|
||||
|
||||
```
|
||||
GET /api/v1/archived-emails?limit=100&sort=archivedAt:desc
|
||||
```
|
||||
|
||||
**Search for Specific Emails**:
|
||||
|
||||
```
|
||||
GET /api/v1/archived-emails/search?query=subject:invoice&limit=50
|
||||
```
|
||||
|
||||
### Step 3: Check Current Label Status
|
||||
|
||||
Before applying a new label, verify the email's current state:
|
||||
|
||||
**Check Email Label**:
|
||||
|
||||
```
|
||||
GET /api/v1/enterprise/retention-policy/email/{emailId}/label
|
||||
```
|
||||
|
||||
This returns `null` if no label is applied, or the current label information if one exists.
|
||||
|
||||
### Step 4: Apply Retention Label
|
||||
|
||||
Apply the appropriate label to the email:
|
||||
|
||||
**Apply Label**:
|
||||
|
||||
```
|
||||
POST /api/v1/enterprise/retention-policy/email/{emailId}/label
|
||||
Content-Type: application/json
|
||||
|
||||
{
|
||||
"labelId": "your-label-uuid-here"
|
||||
}
|
||||
```
|
||||
|
||||
### Step 5: Verify Application
|
||||
|
||||
Confirm the label was successfully applied by checking the response or making another GET request.
|
||||
|
||||
## Label Management
|
||||
|
||||
### Getting Available Labels
|
||||
|
||||
List all available retention labels to identify which ones to use:
|
||||
|
||||
```
|
||||
GET /api/v1/enterprise/retention-policy/labels
|
||||
```
|
||||
|
||||
This returns all labels with their IDs, names, retention periods, and status (enabled/disabled).
|
||||
|
||||
### Label Selection Strategy
|
||||
|
||||
- **Pre-create labels** through the UI with appropriate names and retention periods
|
||||
- **Map business rules** to specific label IDs in your automation logic
|
||||
- **Cache label information** to avoid repeated API calls
|
||||
- **Handle disabled labels** gracefully (they cannot be applied to new emails)
|
||||
|
||||
## Implementation Patterns
|
||||
|
||||
### Pattern 1: Post-Ingestion Processing
|
||||
|
||||
Apply labels after emails have been fully ingested and indexed:
|
||||
|
||||
1. Monitor for newly ingested emails (via webhooks or polling)
|
||||
2. Analyze email content and metadata
|
||||
3. Determine appropriate retention label based on business rules
|
||||
4. Apply the label via API
|
||||
|
||||
### Pattern 2: Batch Processing
|
||||
|
||||
Process emails in scheduled batches:
|
||||
|
||||
1. Query for unlabeled emails periodically (daily/weekly)
|
||||
2. Process emails in manageable batches (50-100 emails)
|
||||
3. Apply classification logic and labels
|
||||
4. Log results for audit and monitoring
|
||||
|
||||
### Pattern 3: Event-Driven Tagging
|
||||
|
||||
React to specific events or triggers:
|
||||
|
||||
1. Receive notification of specific events (legal hold notice, project start, etc.)
|
||||
2. Search for relevant emails based on criteria
|
||||
3. Apply appropriate labels to all matching emails
|
||||
4. Document the mass labeling action
|
||||
|
||||
## Authentication and Security
|
||||
|
||||
### API Key Management
|
||||
|
||||
- **Use dedicated API keys** for automated systems (not user accounts)
|
||||
- **Assign minimal required permissions** (`delete:archive` for label application)
|
||||
- **Rotate API keys regularly** as part of security best practices
|
||||
- **Store keys securely** using environment variables or secret management systems
|
||||
|
||||
### Request Authentication
|
||||
|
||||
Include the API key in all requests:
|
||||
|
||||
```
|
||||
Authorization: Bearer your-api-key-here
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Common Error Scenarios
|
||||
|
||||
- **404 Email Not Found**: The specified email ID doesn't exist
|
||||
- **404 Label Not Found**: The label ID is invalid or label has been deleted
|
||||
- **409 Conflict**: Attempting to apply a disabled label
|
||||
- **422 Validation Error**: Invalid request format or missing required fields
|
||||
|
||||
### Best Practices
|
||||
|
||||
- **Check response status codes** and handle errors appropriately
|
||||
- **Implement retry logic** for temporary failures (5xx errors)
|
||||
- **Log all operations** for audit trails and debugging
|
||||
- **Continue processing** other emails even if some fail
|
||||
|
||||
## Performance Considerations
|
||||
|
||||
### Rate Limiting
|
||||
|
||||
- **Process emails in batches** rather than individually when possible
|
||||
- **Add delays between API calls** to avoid overwhelming the server
|
||||
- **Monitor API response times** and adjust batch sizes accordingly
|
||||
|
||||
### Efficiency Tips
|
||||
|
||||
- **Cache label information** to reduce API calls
|
||||
- **Check existing labels** before applying new ones to avoid unnecessary operations
|
||||
- **Use search API** to filter emails rather than processing all emails
|
||||
- **Implement incremental processing** to handle only new or modified emails
|
||||
|
||||
## Monitoring and Auditing
|
||||
|
||||
### Logging Recommendations
|
||||
|
||||
- **Log all label applications** with email ID, label ID, and timestamp
|
||||
- **Track success/failure rates** for monitoring system health
|
||||
- **Record business rule matches** for compliance reporting
|
||||
|
||||
### Audit Trail
|
||||
|
||||
All automated label applications are recorded in the system audit log with:
|
||||
|
||||
- Actor identified as the API key name
|
||||
- Target email and applied label details
|
||||
- Timestamp of the operation
|
||||
|
||||
This ensures full traceability of automated retention decisions.
|
||||
|
||||
## Integration Examples
|
||||
|
||||
### Scenario: Invoice Processing System
|
||||
|
||||
1. **Trigger**: New email arrives with invoice attachment
|
||||
2. **Analysis**: System identifies invoice keywords or attachment types
|
||||
3. **Action**: Apply "Financial Records - 7 Year" label via API
|
||||
4. **Result**: Email retained for regulatory compliance period
|
||||
|
||||
### Scenario: Legal Hold Implementation
|
||||
|
||||
1. **Trigger**: Legal department issues hold notice for specific matter
|
||||
2. **Search**: Find all emails matching case criteria (participants, keywords, date range)
|
||||
3. **Action**: Apply "Legal Hold - Matter XYZ" label to all matching emails
|
||||
4. **Result**: All relevant emails preserved indefinitely
|
||||
|
||||
### Scenario: Data Classification Integration
|
||||
|
||||
1. **Trigger**: Content classification system processes new emails
|
||||
2. **Analysis**: ML system categorizes email as "Confidential Financial Data"
|
||||
3. **Mapping**: Business rules map category to "Financial Confidential - 10 Year" label
|
||||
4. **Action**: Apply label via API
|
||||
5. **Result**: Automatic compliance with data retention policies
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Set up authentication** by creating an API key with appropriate permissions
|
||||
2. **Identify your use cases** and create corresponding retention labels through the UI
|
||||
3. **Test the API** with a few sample emails to understand the workflow
|
||||
4. **Implement your business logic** to identify which emails need which labels
|
||||
5. **Deploy your automation** with proper error handling and monitoring
|
||||
6. **Monitor results** and adjust your classification rules as needed
|
||||
|
||||
This automated approach ensures consistent retention policy enforcement while reducing manual administrative overhead.
|
||||
224
docs/enterprise/retention-labels/guide.md
Normal file
224
docs/enterprise/retention-labels/guide.md
Normal file
@@ -0,0 +1,224 @@
|
||||
# Retention Labels: User Interface Guide
|
||||
|
||||
The retention labels management interface is located at **Dashboard → Compliance → Retention Labels**. It provides a comprehensive view of all configured labels and tools for creating, editing, deleting, and applying labels to individual archived emails.
|
||||
|
||||
## Overview
|
||||
|
||||
Retention labels provide item-level retention control, allowing administrators to override normal retention policies for specific emails with custom retention periods. This is particularly useful for legal holds, regulatory compliance, and preserving important business communications.
|
||||
|
||||
## Labels Table
|
||||
|
||||
The main page displays a table of all retention labels with the following columns:
|
||||
|
||||
- **Name:** The label name and its UUID displayed underneath for reference. If a description is provided, it appears below the name in smaller text.
|
||||
- **Retention Period:** The number of days emails with this label are retained, displayed as "X days".
|
||||
- **Status:** A badge indicating whether the label is:
|
||||
- **Enabled** (green badge): The label can be applied to new emails
|
||||
- **Disabled** (gray badge): The label cannot be applied to new emails but continues to govern already-labeled emails
|
||||
- **Created At:** The date the label was created, displayed in local date format.
|
||||
- **Actions:** Dropdown menu with Edit and Delete options for each label.
|
||||
|
||||
The table is sorted by creation date in ascending order by default.
|
||||
|
||||
## Creating a Label
|
||||
|
||||
Click the **"Create New"** button (with plus icon) above the table to open the creation dialog.
|
||||
|
||||
### Form Fields
|
||||
|
||||
- **Name** (Required): A unique, descriptive name for the label. Maximum 255 characters.
|
||||
- **Description** (Optional): A detailed explanation of the label's purpose or usage. Maximum 1000 characters.
|
||||
- **Retention Period (Days)** (Required): The number of days to retain emails with this label. Must be at least 1 day.
|
||||
|
||||
### Example Labels
|
||||
|
||||
- **Name:** "Legal Hold - Project Alpha"
|
||||
**Description:** "Extended retention for emails related to ongoing litigation regarding Project Alpha intellectual property dispute"
|
||||
**Retention Period:** 3650 days (10 years)
|
||||
|
||||
- **Name:** "Executive Communications"
|
||||
**Description:** "Preserve important emails from C-level executives beyond normal retention periods"
|
||||
**Retention Period:** 2555 days (7 years)
|
||||
|
||||
- **Name:** "Financial Records Q4 2025"
|
||||
**Retention Period:** 2190 days (6 years)
|
||||
|
||||
### Success and Error Handling
|
||||
|
||||
- **Success**: The dialog closes and a green success notification appears confirming the label was created.
|
||||
- **Name Conflict**: If a label with the same name already exists, an error notification will display.
|
||||
- **Validation Errors**: Missing required fields or invalid values will show inline validation messages.
|
||||
|
||||
## Editing a Label
|
||||
|
||||
Click the **Edit** option from the actions dropdown on any label row to open the edit dialog.
|
||||
|
||||
### Editable Fields
|
||||
|
||||
- **Name**: Can always be modified (subject to uniqueness constraint)
|
||||
- **Description**: Can always be modified
|
||||
- **Retention Period**: Can only be modified if the label has never been applied to any emails
|
||||
|
||||
### Retention Period Restrictions
|
||||
|
||||
The edit dialog shows a warning message: "Retention period cannot be modified if this label is currently applied to emails." If you attempt to change the retention period for a label that's in use, the system will return a conflict error and display an appropriate error message.
|
||||
|
||||
This restriction prevents tampering with active retention schedules and ensures compliance integrity.
|
||||
|
||||
### Update Process
|
||||
|
||||
1. Modify the desired fields
|
||||
2. Click **Save** to submit changes
|
||||
3. The system validates the changes and updates the label
|
||||
4. A success notification confirms the update
|
||||
|
||||
## Deleting a Label
|
||||
|
||||
Click the **Delete** option from the actions dropdown to open the deletion confirmation dialog.
|
||||
|
||||
### Smart Deletion Behavior
|
||||
|
||||
The system uses intelligent deletion logic:
|
||||
|
||||
#### Hard Delete
|
||||
|
||||
If the label has **never been applied** to any emails:
|
||||
|
||||
- The label is permanently removed from the system
|
||||
- Success message: "Label deleted successfully"
|
||||
|
||||
#### Soft Disable
|
||||
|
||||
If the label is **currently applied** to one or more emails:
|
||||
|
||||
- The label is marked as "Disabled" instead of being deleted
|
||||
- The label remains in the table with a "Disabled" status badge
|
||||
- Existing emails keep their retention schedule based on this label
|
||||
- The label cannot be applied to new emails
|
||||
- Success message: "Label disabled successfully"
|
||||
|
||||
### Confirmation Dialog
|
||||
|
||||
The deletion dialog shows:
|
||||
|
||||
- **Title**: "Delete Retention Label"
|
||||
- **Description**: Explains that this action cannot be undone and may disable the label if it's in use
|
||||
- **Cancel** button to abort the operation
|
||||
- **Confirm** button to proceed with deletion
|
||||
|
||||
## Applying Labels to Emails
|
||||
|
||||
Retention labels can be applied to individual archived emails through the email detail pages.
|
||||
|
||||
### From Email Detail Page
|
||||
|
||||
1. Navigate to an archived email by clicking on it from search results or the archived emails list
|
||||
2. Look for the "Retention Label" section in the email metadata
|
||||
3. If no label is applied, you'll see an "Apply Label" button (requires `delete:archive` permission)
|
||||
4. If a label is already applied, you'll see:
|
||||
- The current label name and retention period
|
||||
- "Change Label" and "Remove Label" buttons
|
||||
|
||||
### Label Application Process
|
||||
|
||||
1. Click **"Apply Label"** or **"Change Label"**
|
||||
2. A dropdown or dialog shows all available (enabled) labels
|
||||
3. Select the desired label
|
||||
4. Confirm the application
|
||||
5. The system:
|
||||
- Removes any existing label from the email
|
||||
- Applies the new label
|
||||
- Records the action in the audit log
|
||||
- Updates the email's retention schedule
|
||||
|
||||
### One Label Per Email Rule
|
||||
|
||||
Each email can have at most one retention label. When you apply a new label to an email that already has a label, the previous label is automatically removed and replaced with the new one.
|
||||
|
||||
## Permissions Required
|
||||
|
||||
Different operations require different permission levels:
|
||||
|
||||
### Label Management
|
||||
|
||||
- **Create, Edit, Delete Labels**: Requires `manage:all` permission
|
||||
- **View Labels Table**: Requires `manage:all` permission
|
||||
|
||||
### Email Label Operations
|
||||
|
||||
- **View Email Labels**: Requires `read:archive` permission
|
||||
- **Apply/Remove Email Labels**: Requires `delete:archive` permission
|
||||
|
||||
## Status Indicators
|
||||
|
||||
### Enabled Labels (Green Badge)
|
||||
|
||||
- Can be applied to new emails
|
||||
- Appears in label selection dropdowns
|
||||
- Fully functional for all operations
|
||||
|
||||
### Disabled Labels (Gray Badge)
|
||||
|
||||
- Cannot be applied to new emails
|
||||
- Does not appear in label selection dropdowns
|
||||
- Continues to govern retention for already-labeled emails
|
||||
- Can still be viewed and its details examined
|
||||
- Results from attempting to delete a label that's currently in use
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Naming Conventions
|
||||
|
||||
- Use descriptive names that indicate purpose: "Legal Hold - Case XYZ", "Executive - Q4 Review"
|
||||
- Include time periods or case references where relevant
|
||||
- Maintain consistent naming patterns across your organization
|
||||
|
||||
### Descriptions
|
||||
|
||||
- Always provide descriptions for complex or specialized labels
|
||||
- Include the business reason or legal requirement driving the retention period
|
||||
- Reference specific regulations, policies, or legal matters where applicable
|
||||
|
||||
### Retention Periods
|
||||
|
||||
- Consider your organization's legal and regulatory requirements
|
||||
- Common periods:
|
||||
- **3 years (1095 days)**: Standard business records
|
||||
- **7 years (2555 days)**: Financial and tax records
|
||||
- **10 years (3650 days)**: Legal holds and critical business documents
|
||||
- **Permanent retention**: Use very large numbers (e.g., 36500 days = 100 years)
|
||||
|
||||
### Label Lifecycle
|
||||
|
||||
- Review labels periodically to identify unused or obsolete labels
|
||||
- Disabled labels can accumulate over time - consider cleanup procedures
|
||||
- Document the purpose and expected lifecycle of each label for future administrators
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Cannot Edit Retention Period
|
||||
|
||||
**Problem**: Edit dialog shows retention period as locked or returns conflict error
|
||||
**Cause**: The label is currently applied to one or more emails
|
||||
**Solution**: Create a new label with the desired retention period instead of modifying the existing one
|
||||
|
||||
### Label Not Appearing in Email Application Dropdown
|
||||
|
||||
**Problem**: A label doesn't show up when trying to apply it to an email
|
||||
**Cause**: The label is disabled
|
||||
**Solution**: Check the labels table - disabled labels show a gray "Disabled" badge
|
||||
|
||||
### Cannot Delete Label
|
||||
|
||||
**Problem**: Deletion results in label being disabled instead of removed
|
||||
**Cause**: The label is currently applied to emails
|
||||
**Solution**: This is expected behavior to preserve retention integrity. The label can only be hard-deleted if it has never been used.
|
||||
|
||||
### Permission Denied Errors
|
||||
|
||||
**Problem**: Cannot access label management or apply labels to emails
|
||||
**Cause**: Insufficient permissions
|
||||
**Solution**: Contact your system administrator to verify you have the required permissions:
|
||||
|
||||
- `manage:all` for label management
|
||||
- `delete:archive` for email label operations
|
||||
117
docs/enterprise/retention-labels/index.md
Normal file
117
docs/enterprise/retention-labels/index.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# Retention Labels
|
||||
|
||||
The Retention Labels feature is an enterprise-grade capability that provides item-level retention overrides for archived emails. Unlike retention policies which apply rules to groups of emails, retention labels are manually or programmatically applied to individual emails to override the normal retention lifecycle with specific retention periods.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Item-Level Retention Override
|
||||
|
||||
Retention labels represent a specific, targeted retention requirement that takes precedence over any automated retention policies. When an email has a retention label applied, the label's `retentionPeriodDays` becomes the governing retention period for that email, regardless of what any retention policy would otherwise specify.
|
||||
|
||||
### 2. One Label Per Email
|
||||
|
||||
Each archived email can have at most one retention label applied at any time. Applying a new label to an email automatically removes any existing label, ensuring a clean, unambiguous retention state.
|
||||
|
||||
### 3. Deletion Behavior
|
||||
|
||||
Retention labels implement the following deletion logic:
|
||||
|
||||
- **Hard Delete**: If a label has never been applied to any emails, it can be completely removed from the system.
|
||||
- **Soft Disable**: If a label is currently applied to one or more emails, deletion attempts result in the label being marked as `isDisabled = true`. This keeps the label-email relations but the retention label won't take effective.
|
||||
- **Delete Disabled Labels**: If a label is currently applied to one or more emails, and it is disabled, a deletion request will delete the label itself and all label-email relations (remove the label from emails it is tagged with).
|
||||
|
||||
### 4. Immutable Retention Period
|
||||
|
||||
Once a retention label has been applied to any email, its `retentionPeriodDays` value becomes immutable to prevent tampering with active retention schedules. Labels can only have their retention period modified while they have zero applications.
|
||||
|
||||
### 5. User Attribution and Audit Trail
|
||||
|
||||
Every label application and removal is attributed to a specific user and recorded in the [Audit Log](../audit-log/index.md). This includes both manual UI actions and automated API operations, ensuring complete traceability of retention decisions.
|
||||
|
||||
### 6. Lifecycle Integration
|
||||
|
||||
The [Lifecycle Worker](../retention-policy/lifecycle-worker.md) gives retention labels the highest priority during email evaluation. If an email has a retention label applied, the label's retention period is used instead of any matching retention policy rules.
|
||||
|
||||
## Feature Requirements
|
||||
|
||||
The Retention Labels feature requires:
|
||||
|
||||
- An active **Enterprise license** with the `RETENTION_POLICY` feature enabled.
|
||||
- The `manage:all` permission for administrative operations (creating, editing, deleting labels).
|
||||
- The `delete:archive` permission for applying and removing labels from individual emails.
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Legal Hold Alternative
|
||||
|
||||
Retention labels can serve as a lightweight alternative to formal legal holds by applying extended retention periods (e.g., 10+ years) to specific emails related to litigation or investigation.
|
||||
|
||||
### Executive Communications
|
||||
|
||||
Apply extended retention to emails from or to executive leadership to ensure important business communications are preserved beyond normal retention periods.
|
||||
|
||||
### Regulatory Exceptions
|
||||
|
||||
Mark specific emails that must be retained for regulatory compliance (e.g., financial records, safety incidents) with appropriate retention periods regardless of general policy rules.
|
||||
|
||||
### Project-Specific Retention
|
||||
|
||||
Apply custom retention periods to emails related to specific projects, contracts, or business initiatives that have unique preservation requirements.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The feature is composed of the following components:
|
||||
|
||||
| Component | Location | Description |
|
||||
| ----------------- | -------------------------------------------------------------------------------- | -------------------------------------------------------- |
|
||||
| Types | `packages/types/src/retention.types.ts` | Shared TypeScript types for labels and email label info. |
|
||||
| Database Schema | `packages/backend/src/database/schema/compliance.ts` | Drizzle ORM table definitions for retention labels. |
|
||||
| Label Service | `packages/enterprise/src/modules/retention-policy/RetentionLabelService.ts` | CRUD operations and label application logic. |
|
||||
| API Controller | `packages/enterprise/src/modules/retention-policy/retention-label.controller.ts` | Express request handlers with Zod validation. |
|
||||
| API Routes | `packages/enterprise/src/modules/retention-policy/retention-policy.routes.ts` | Route registration with auth and feature guards. |
|
||||
| Frontend Page | `packages/frontend/src/routes/dashboard/compliance/retention-labels/` | SvelteKit page for label management. |
|
||||
| Email Integration | Individual archived email pages | Label application UI in email detail views. |
|
||||
|
||||
## Data Model
|
||||
|
||||
### Retention Labels Table
|
||||
|
||||
| Column | Type | Description |
|
||||
| ----------------------- | -------------- | ---------------------------------------------------------------- |
|
||||
| `id` | `uuid` (PK) | Auto-generated unique identifier. |
|
||||
| `name` | `varchar(255)` | Human-readable label name (unique constraint). |
|
||||
| `retention_period_days` | `integer` | Number of days to retain emails with this label. |
|
||||
| `description` | `text` | Optional description of the label's purpose. |
|
||||
| `is_disabled` | `boolean` | Whether the label is disabled (cannot be applied to new emails). |
|
||||
| `created_at` | `timestamptz` | Creation timestamp. |
|
||||
|
||||
### Email Label Applications Table
|
||||
|
||||
| Column | Type | Description |
|
||||
| -------------------- | ------------- | ------------------------------------------------------------- |
|
||||
| `email_id` | `uuid` (FK) | Reference to the archived email. |
|
||||
| `label_id` | `uuid` (FK) | Reference to the retention label. |
|
||||
| `applied_at` | `timestamptz` | Timestamp when the label was applied. |
|
||||
| `applied_by_user_id` | `uuid` (FK) | User who applied the label (nullable for API key operations). |
|
||||
|
||||
The table uses a composite primary key of `(email_id, label_id)` to enforce the one-label-per-email constraint at the database level.
|
||||
|
||||
## Integration Points
|
||||
|
||||
### Lifecycle Worker
|
||||
|
||||
The lifecycle worker queries the `email_retention_labels` table during email evaluation. If an email has a retention label applied, the label's `retentionPeriodDays` takes precedence over any retention policy evaluation.
|
||||
|
||||
### Audit Log
|
||||
|
||||
All retention label operations generate audit log entries:
|
||||
|
||||
- **Label Creation**: Action type `CREATE`, target type `RetentionLabel`
|
||||
- **Label Updates**: Action type `UPDATE`, target type `RetentionLabel`
|
||||
- **Label Deletion/Disabling**: Action type `DELETE` or `UPDATE`, target type `RetentionLabel`
|
||||
- **Label Application**: Action type `UPDATE`, target type `ArchivedEmail`, details include label information
|
||||
- **Label Removal**: Action type `UPDATE`, target type `ArchivedEmail`, details include removed label information
|
||||
|
||||
### Email Detail Pages
|
||||
|
||||
Individual archived email pages display any applied retention label and provide controls for users with appropriate permissions to apply or remove labels.
|
||||
268
docs/enterprise/retention-policy/api.md
Normal file
268
docs/enterprise/retention-policy/api.md
Normal file
@@ -0,0 +1,268 @@
|
||||
# Retention Policy: API Endpoints
|
||||
|
||||
The retention policy feature exposes a RESTful API for managing retention policies and simulating policy evaluation against email metadata. All endpoints require authentication and the `manage:all` permission.
|
||||
|
||||
**Base URL:** `/api/v1/enterprise/retention-policy`
|
||||
|
||||
All endpoints also require the `RETENTION_POLICY` feature to be enabled in the enterprise license.
|
||||
|
||||
---
|
||||
|
||||
## List All Policies
|
||||
|
||||
Retrieves all retention policies, ordered by priority ascending.
|
||||
|
||||
- **Endpoint:** `GET /policies`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"name": "Default 7-Year Retention",
|
||||
"description": "Retain all emails for 7 years per regulatory requirements.",
|
||||
"priority": 1,
|
||||
"conditions": null,
|
||||
"ingestionScope": null,
|
||||
"retentionPeriodDays": 2555,
|
||||
"isActive": true,
|
||||
"createdAt": "2025-10-01T00:00:00.000Z",
|
||||
"updatedAt": "2025-10-01T00:00:00.000Z"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Get Policy by ID
|
||||
|
||||
Retrieves a single retention policy by its UUID.
|
||||
|
||||
- **Endpoint:** `GET /policies/:id`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | ------------------------------ |
|
||||
| `id` | `uuid` | The UUID of the policy to get. |
|
||||
|
||||
### Response Body
|
||||
|
||||
Returns a single policy object (same shape as the list endpoint), or `404` if not found.
|
||||
|
||||
---
|
||||
|
||||
## Create Policy
|
||||
|
||||
Creates a new retention policy. The policy name must be unique across the system.
|
||||
|
||||
- **Endpoint:** `POST /policies`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| --------------------- | ------------------- | -------- | ---------------------------------------------------------------------------------------- |
|
||||
| `name` | `string` | Yes | Unique policy name. Max 255 characters. |
|
||||
| `description` | `string` | No | Human-readable description. Max 1000 characters. |
|
||||
| `priority` | `integer` | Yes | Positive integer. Lower values indicate higher priority. |
|
||||
| `retentionPeriodDays` | `integer` | Yes | Number of days to retain matching emails. Minimum 1. |
|
||||
| `actionOnExpiry` | `string` | Yes | Action to take when the retention period expires. Currently only `"delete_permanently"`. |
|
||||
| `isEnabled` | `boolean` | No | Whether the policy is active. Defaults to `true`. |
|
||||
| `conditions` | `RuleGroup \| null` | No | Condition rules for targeting specific emails. `null` matches all emails. |
|
||||
| `ingestionScope` | `string[] \| null` | No | Array of ingestion source UUIDs to scope the policy to. `null` applies to all sources. |
|
||||
|
||||
#### Conditions (RuleGroup) Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"logicalOperator": "AND",
|
||||
"rules": [
|
||||
{
|
||||
"field": "sender",
|
||||
"operator": "domain_match",
|
||||
"value": "example.com"
|
||||
},
|
||||
{
|
||||
"field": "subject",
|
||||
"operator": "contains",
|
||||
"value": "invoice"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
**Supported fields:** `sender`, `recipient`, `subject`, `attachment_type`
|
||||
|
||||
**Supported operators:**
|
||||
|
||||
| Operator | Description |
|
||||
| -------------- | ------------------------------------------------------------------- |
|
||||
| `equals` | Exact case-insensitive match. |
|
||||
| `not_equals` | Inverse of `equals`. |
|
||||
| `contains` | Case-insensitive substring match. |
|
||||
| `not_contains` | Inverse of `contains`. |
|
||||
| `starts_with` | Case-insensitive prefix match. |
|
||||
| `ends_with` | Case-insensitive suffix match. |
|
||||
| `domain_match` | Matches when an email address ends with `@<value>`. |
|
||||
| `regex_match` | ECMAScript regex (case-insensitive). Max pattern length: 200 chars. |
|
||||
|
||||
**Validation limits:**
|
||||
|
||||
- Maximum 50 rules per group.
|
||||
- Rule `value` must be between 1 and 500 characters.
|
||||
|
||||
### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Finance Department - 10 Year",
|
||||
"description": "Extended retention for finance-related correspondence.",
|
||||
"priority": 2,
|
||||
"retentionPeriodDays": 3650,
|
||||
"actionOnExpiry": "delete_permanently",
|
||||
"conditions": {
|
||||
"logicalOperator": "OR",
|
||||
"rules": [
|
||||
{
|
||||
"field": "sender",
|
||||
"operator": "domain_match",
|
||||
"value": "finance.acme.com"
|
||||
},
|
||||
{
|
||||
"field": "recipient",
|
||||
"operator": "domain_match",
|
||||
"value": "finance.acme.com"
|
||||
}
|
||||
]
|
||||
},
|
||||
"ingestionScope": ["b2c3d4e5-f6a7-8901-bcde-f23456789012"]
|
||||
}
|
||||
```
|
||||
|
||||
### Response
|
||||
|
||||
- **`201 Created`** — Returns the created policy object.
|
||||
- **`409 Conflict`** — A policy with this name already exists.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
---
|
||||
|
||||
## Update Policy
|
||||
|
||||
Updates an existing retention policy. Only the fields included in the request body are modified.
|
||||
|
||||
- **Endpoint:** `PUT /policies/:id`
|
||||
- **Method:** `PUT`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | --------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the policy to update. |
|
||||
|
||||
### Request Body
|
||||
|
||||
All fields from the create endpoint are accepted, and all are optional. Only provided fields are updated.
|
||||
|
||||
To clear conditions (make the policy match all emails), send `"conditions": null`.
|
||||
|
||||
To clear ingestion scope (make the policy apply to all sources), send `"ingestionScope": null`.
|
||||
|
||||
### Response
|
||||
|
||||
- **`200 OK`** — Returns the updated policy object.
|
||||
- **`404 Not Found`** — Policy with the given ID does not exist.
|
||||
- **`422 Unprocessable Entity`** — Validation errors.
|
||||
|
||||
---
|
||||
|
||||
## Delete Policy
|
||||
|
||||
Permanently deletes a retention policy. This action is irreversible.
|
||||
|
||||
- **Endpoint:** `DELETE /policies/:id`
|
||||
- **Method:** `DELETE`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Path Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ------ | --------------------------------- |
|
||||
| `id` | `uuid` | The UUID of the policy to delete. |
|
||||
|
||||
### Response
|
||||
|
||||
- **`204 No Content`** — Policy successfully deleted.
|
||||
- **`404 Not Found`** — Policy with the given ID does not exist.
|
||||
|
||||
---
|
||||
|
||||
## Evaluate Email (Policy Simulator)
|
||||
|
||||
Evaluates a set of email metadata against all active policies and returns the applicable retention period and matching policy IDs. This endpoint does not modify any data — it is a read-only simulation tool.
|
||||
|
||||
- **Endpoint:** `POST /policies/evaluate`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
- **Permission:** `manage:all`
|
||||
|
||||
### Request Body
|
||||
|
||||
| Field | Type | Required | Description |
|
||||
| --------------------------------- | ---------- | -------- | ---------------------------------------------------------- |
|
||||
| `emailMetadata.sender` | `string` | Yes | Sender email address. Max 500 characters. |
|
||||
| `emailMetadata.recipients` | `string[]` | Yes | Recipient email addresses. Max 500 entries. |
|
||||
| `emailMetadata.subject` | `string` | Yes | Email subject line. Max 2000 characters. |
|
||||
| `emailMetadata.attachmentTypes` | `string[]` | Yes | File extensions (e.g., `[".pdf", ".xml"]`). Max 100. |
|
||||
| `emailMetadata.ingestionSourceId` | `uuid` | No | Optional ingestion source UUID for scope-aware evaluation. |
|
||||
|
||||
### Example Request
|
||||
|
||||
```json
|
||||
{
|
||||
"emailMetadata": {
|
||||
"sender": "cfo@finance.acme.com",
|
||||
"recipients": ["legal@acme.com"],
|
||||
"subject": "Q4 Invoice Reconciliation",
|
||||
"attachmentTypes": [".pdf", ".xlsx"],
|
||||
"ingestionSourceId": "b2c3d4e5-f6a7-8901-bcde-f23456789012"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"appliedRetentionDays": 3650,
|
||||
"actionOnExpiry": "delete_permanently",
|
||||
"matchingPolicyIds": [
|
||||
"a1b2c3d4-e5f6-7890-abcd-ef1234567890",
|
||||
"c3d4e5f6-a7b8-9012-cdef-345678901234"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
| Field | Type | Description |
|
||||
| ---------------------- | ---------- | ------------------------------------------------------------------------------------- |
|
||||
| `appliedRetentionDays` | `integer` | The longest retention period from all matching policies. `0` means no policy matched. |
|
||||
| `actionOnExpiry` | `string` | The action to take on expiry. Currently always `"delete_permanently"`. |
|
||||
| `matchingPolicyIds` | `string[]` | UUIDs of all policies that matched the provided metadata. |
|
||||
|
||||
### Response Codes
|
||||
|
||||
- **`200 OK`** — Evaluation completed.
|
||||
- **`422 Unprocessable Entity`** — Validation errors in the request body.
|
||||
93
docs/enterprise/retention-policy/guide.md
Normal file
93
docs/enterprise/retention-policy/guide.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Retention Policy: User Interface
|
||||
|
||||
The retention policy management interface is located at **Dashboard → Compliance → Retention Policies**. It provides a comprehensive view of all configured policies and tools for creating, editing, deleting, and simulating retention rules.
|
||||
|
||||
## Policy Table
|
||||
|
||||
The main page displays a table of all retention policies with the following columns:
|
||||
|
||||
- **Name:** The policy name and its UUID displayed underneath for reference.
|
||||
- **Priority:** The numeric priority value. Lower values indicate higher priority.
|
||||
- **Retention Period:** The number of days emails matching this policy are retained before expiry.
|
||||
- **Ingestion Scope:** Shows which ingestion sources the policy is restricted to. Displays "All ingestion sources" when the policy has no scope restriction, or individual source name badges when scoped.
|
||||
- **Conditions:** A summary of the rule group. Displays "No conditions (matches all emails)" for policies without conditions, or "N rule(s) (AND/OR)" for policies with conditions.
|
||||
- **Status:** A badge indicating whether the policy is Active or Inactive.
|
||||
- **Actions:** Edit and Delete buttons for each policy.
|
||||
|
||||
The table is sorted by policy priority by default.
|
||||
|
||||
## Creating a Policy
|
||||
|
||||
Click the **"Create Policy"** button above the table to open the creation dialog. The form contains the following sections:
|
||||
|
||||
### Basic Information
|
||||
|
||||
- **Policy Name:** A unique, descriptive name for the policy.
|
||||
- **Description:** An optional detailed description of the policy's purpose.
|
||||
- **Priority:** A positive integer determining evaluation order (lower = higher priority).
|
||||
- **Retention Period (Days):** The number of days to retain matching emails.
|
||||
|
||||
### Ingestion Scope
|
||||
|
||||
This section controls which ingestion sources the policy applies to:
|
||||
|
||||
- **"All ingestion sources" toggle:** When enabled, the policy applies to emails from all ingestion sources. This is the default.
|
||||
- **Per-source checkboxes:** When the "all" toggle is disabled, individual ingestion sources can be selected. Each source displays its name and provider type as a badge.
|
||||
|
||||
### Condition Rules
|
||||
|
||||
Conditions define which emails the policy targets. If no conditions are added, the policy matches all emails (within its ingestion scope).
|
||||
|
||||
- **Logical Operator:** Choose **AND** (all rules must match) or **OR** (any rule must match).
|
||||
- **Add Rule:** Each rule consists of:
|
||||
- **Field:** The email metadata field to evaluate (`sender`, `recipient`, `subject`, or `attachment_type`).
|
||||
- **Operator:** The comparison operator (see [Supported Operators](#supported-operators) below).
|
||||
- **Value:** The string value to compare against.
|
||||
- **Remove Rule:** Each rule has a remove button to delete it from the group.
|
||||
|
||||
### Supported Operators
|
||||
|
||||
| Operator | Display Name | Description |
|
||||
| -------------- | ------------ | ---------------------------------------------------------------- |
|
||||
| `equals` | Equals | Exact case-insensitive match. |
|
||||
| `not_equals` | Not Equals | Inverse of equals. |
|
||||
| `contains` | Contains | Case-insensitive substring match. |
|
||||
| `not_contains` | Not Contains | Inverse of contains. |
|
||||
| `starts_with` | Starts With | Case-insensitive prefix match. |
|
||||
| `ends_with` | Ends With | Case-insensitive suffix match. |
|
||||
| `domain_match` | Domain Match | Matches when an email address ends with `@<value>`. |
|
||||
| `regex_match` | Regex Match | ECMAScript regular expression (case-insensitive, max 200 chars). |
|
||||
|
||||
### Policy Status
|
||||
|
||||
- **Enable Policy toggle:** Controls whether the policy is active immediately upon creation.
|
||||
|
||||
## Editing a Policy
|
||||
|
||||
Click the **Edit** button (pencil icon) on any policy row to open the edit dialog. The form is pre-populated with the policy's current values. All fields can be modified, and the same validation rules apply as during creation.
|
||||
|
||||
## Deleting a Policy
|
||||
|
||||
Click the **Delete** button (trash icon) on any policy row. A confirmation dialog appears to prevent accidental deletion. Deleting a policy is irreversible. Once deleted, the policy no longer affects the lifecycle worker's evaluation of emails.
|
||||
|
||||
## Policy Simulator
|
||||
|
||||
The **"Simulate Policy"** button opens a simulation tool that evaluates hypothetical email metadata against all active policies without making any changes.
|
||||
|
||||
### Simulator Input Fields
|
||||
|
||||
- **Sender Email:** The sender address to evaluate (e.g., `cfo@finance.acme.com`).
|
||||
- **Recipients:** A comma-separated list of recipient email addresses.
|
||||
- **Subject:** The email subject line.
|
||||
- **Attachment Types:** A comma-separated list of file extensions (e.g., `.pdf, .xlsx`).
|
||||
- **Ingestion Source:** An optional dropdown to select a specific ingestion source for scope-aware evaluation. Defaults to "All sources".
|
||||
|
||||
### Simulator Results
|
||||
|
||||
After submission, the simulator displays:
|
||||
|
||||
- **Applied Retention Period:** The longest retention period from all matching policies, displayed in days.
|
||||
- **Action on Expiry:** The action that would be taken when the retention period expires (currently always "Permanent Deletion").
|
||||
- **Matching Policies:** A list of all policy IDs (with their names) that matched the provided metadata. If no policies match, a message indicates that no matching policies were found.
|
||||
|
||||
The simulator is a safe, read-only tool intended for testing and verifying policy configurations before they affect live data.
|
||||
55
docs/enterprise/retention-policy/index.md
Normal file
55
docs/enterprise/retention-policy/index.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Retention Policy
|
||||
|
||||
The Retention Policy Engine is an enterprise-grade feature that automates the lifecycle management of archived emails. It enables organizations to define time-based retention rules that determine how long archived emails are kept before they are permanently deleted, ensuring compliance with data protection regulations and internal data governance policies.
|
||||
|
||||
## Core Principles
|
||||
|
||||
### 1. Policy-Based Automation
|
||||
|
||||
Email deletion is never arbitrary. Every deletion is governed by one or more explicitly configured retention policies that define the retention period in days, the conditions under which the policy applies, and the action to take when an email expires. The lifecycle worker processes emails in batches on a recurring schedule, ensuring continuous enforcement without manual intervention.
|
||||
|
||||
### 2. Condition-Based Targeting
|
||||
|
||||
Policies can target specific subsets of archived emails using a flexible condition builder. Conditions are evaluated against email metadata fields (sender, recipient, subject, attachment type) using a variety of string-matching operators. Conditions within a policy are grouped using AND/OR logic, allowing precise control over which emails a policy applies to.
|
||||
|
||||
### 3. Ingestion Scope
|
||||
|
||||
Each policy can optionally be scoped to one or more ingestion sources. When an ingestion scope is set, the policy only applies to emails that were archived from those specific sources. Policies with no ingestion scope (null) apply to all emails regardless of their source.
|
||||
|
||||
### 4. Priority and Max-Duration-Wins
|
||||
|
||||
When multiple policies match a single email, the system applies **max-duration-wins** logic: the longest matching retention period is used. This ensures that if any policy requires an email to be kept longer, that requirement is honored. The priority field on each policy provides an ordering mechanism for administrative purposes and future conflict-resolution enhancements.
|
||||
|
||||
### 5. Full Audit Trail
|
||||
|
||||
Every policy lifecycle event — creation, modification, deletion, and every automated email deletion — is recorded in the immutable [Audit Log](../audit-log/index.md). Automated deletions include the IDs of the governing policies in the audit log entry, ensuring full traceability from deletion back to the rule that triggered it.
|
||||
|
||||
### 6. Fail-Safe Behavior
|
||||
|
||||
The system is designed to err on the side of caution:
|
||||
|
||||
- If no policy matches an email, the email is **not** deleted.
|
||||
- If the lifecycle worker encounters an error processing a specific email, it logs the error and continues with the remaining emails in the batch.
|
||||
- Invalid regex patterns in `regex_match` rules are treated as non-matching rather than causing failures.
|
||||
|
||||
## Feature Requirements
|
||||
|
||||
The Retention Policy Engine requires:
|
||||
|
||||
- An active **Enterprise license** with the `RETENTION_POLICY` feature enabled.
|
||||
- The `manage:all` permission for the authenticated user to access the policy management API and UI.
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
The feature is composed of the following components:
|
||||
|
||||
| Component | Location | Description |
|
||||
| ----------------- | --------------------------------------------------------------------------------- | ------------------------------------------------------------ |
|
||||
| Types | `packages/types/src/retention.types.ts` | Shared TypeScript types for policies, rules, and evaluation. |
|
||||
| Database Schema | `packages/backend/src/database/schema/compliance.ts` | Drizzle ORM table definition for `retention_policies`. |
|
||||
| Retention Service | `packages/enterprise/src/modules/retention-policy/RetentionService.ts` | CRUD operations and the evaluation engine. |
|
||||
| API Controller | `packages/enterprise/src/modules/retention-policy/retention-policy.controller.ts` | Express request handlers with Zod validation. |
|
||||
| API Routes | `packages/enterprise/src/modules/retention-policy/retention-policy.routes.ts` | Route registration with auth and feature guards. |
|
||||
| Module | `packages/enterprise/src/modules/retention-policy/retention-policy.module.ts` | Enterprise module bootstrap. |
|
||||
| Lifecycle Worker | `packages/enterprise/src/workers/lifecycle.worker.ts` | BullMQ worker for automated retention enforcement. |
|
||||
| Frontend Page | `packages/frontend/src/routes/dashboard/compliance/retention-policies/` | SvelteKit page for policy management and simulation. |
|
||||
108
docs/enterprise/retention-policy/lifecycle-worker.md
Normal file
108
docs/enterprise/retention-policy/lifecycle-worker.md
Normal file
@@ -0,0 +1,108 @@
|
||||
# Retention Policy: Lifecycle Worker
|
||||
|
||||
The lifecycle worker is the automated enforcement component of the retention policy engine. It runs as a BullMQ background worker that periodically scans all archived emails, evaluates them against active retention policies, and permanently deletes emails that have exceeded their retention period.
|
||||
|
||||
## Location
|
||||
|
||||
`packages/enterprise/src/workers/lifecycle.worker.ts`
|
||||
|
||||
## How It Works
|
||||
|
||||
### Scheduling
|
||||
|
||||
The lifecycle worker is registered as a repeatable BullMQ cron job on the `compliance-lifecycle` queue. It is scheduled to run daily at **02:00 UTC** by default. The cron schedule is configured via:
|
||||
|
||||
```typescript
|
||||
repeat: {
|
||||
pattern: '0 2 * * *';
|
||||
} // daily at 02:00 UTC
|
||||
```
|
||||
|
||||
The `scheduleLifecycleJob()` function is called once during enterprise application startup to register the repeatable job with BullMQ.
|
||||
|
||||
### Batch Processing
|
||||
|
||||
To avoid loading the entire `archived_emails` table into memory, the worker processes emails in configurable batches:
|
||||
|
||||
1. **Batch size** is controlled by the `RETENTION_BATCH_SIZE` environment variable.
|
||||
2. Emails are ordered by `archivedAt` ascending.
|
||||
3. The worker iterates through batches using offset-based pagination until an empty batch is returned, indicating all emails have been processed.
|
||||
|
||||
### Per-Email Processing Flow
|
||||
|
||||
For each email in a batch, the worker:
|
||||
|
||||
1. **Extracts metadata:** Builds a `PolicyEvaluationRequest` from the email's database record:
|
||||
- `sender`: The sender email address.
|
||||
- `recipients`: All To, CC, and BCC recipient addresses.
|
||||
- `subject`: The email subject line.
|
||||
- `attachmentTypes`: File extensions (e.g., `.pdf`) extracted from attachment filenames via a join query.
|
||||
- `ingestionSourceId`: The UUID of the ingestion source that archived this email.
|
||||
|
||||
2. **Evaluates policies:** Passes the metadata to `RetentionService.evaluateEmail()`, which returns:
|
||||
- `appliedRetentionDays`: The longest matching retention period (0 if no policy matches).
|
||||
- `matchingPolicyIds`: UUIDs of all matching policies.
|
||||
|
||||
3. **Checks for expiry:**
|
||||
- If `appliedRetentionDays === 0`, no policy matched — the email is **skipped** (not deleted).
|
||||
- Otherwise, the email's age is calculated from its `sentAt` date.
|
||||
- If the age in days exceeds `appliedRetentionDays`, the email has expired.
|
||||
|
||||
4. **Deletes expired emails:** Calls `ArchivedEmailService.deleteArchivedEmail()` with:
|
||||
- `systemDelete: true` — Bypasses the `ENABLE_DELETION` configuration guard so retention enforcement always works regardless of that global setting.
|
||||
- `governingRule` — A string listing the matching policy IDs for the audit log entry (e.g., `"Policy IDs: abc-123, def-456"`).
|
||||
|
||||
5. **Logs the deletion:** A structured log entry records the email ID and its age in days.
|
||||
|
||||
### Error Handling
|
||||
|
||||
If processing a specific email fails (e.g., due to a database error or storage issue), the error is logged and the worker continues to the next email in the batch. This ensures that a single problematic email does not block the processing of the remaining emails.
|
||||
|
||||
If the entire job fails, BullMQ records the failure and the job ID and error are logged. Failed jobs are retained (up to 50) for debugging.
|
||||
|
||||
## System Actor
|
||||
|
||||
Automated deletions are attributed to a synthetic system actor in the audit log:
|
||||
|
||||
| Field | Value |
|
||||
| -------- | ------------------------------- |
|
||||
| ID | `system:lifecycle-worker` |
|
||||
| Email | `system@open-archiver.internal` |
|
||||
| Name | System Lifecycle Worker |
|
||||
| Actor IP | `system` |
|
||||
|
||||
This well-known identifier can be filtered in the [Audit Log](../audit-log/index.md) to view all retention-based deletions.
|
||||
|
||||
## Audit Trail
|
||||
|
||||
Every email deleted by the lifecycle worker produces an audit log entry with:
|
||||
|
||||
- **Action type:** `DELETE`
|
||||
- **Target type:** `ArchivedEmail`
|
||||
- **Target ID:** The UUID of the deleted email
|
||||
- **Actor:** `system:lifecycle-worker`
|
||||
- **Details:** Includes `reason: "RetentionExpiration"` and `governingRule` listing the matching policy IDs
|
||||
|
||||
This ensures that every automated deletion is fully traceable back to the specific policies that triggered it.
|
||||
|
||||
## Configuration
|
||||
|
||||
| Environment Variable | Description | Default |
|
||||
| ---------------------- | ------------------------------------------------ | ------- |
|
||||
| `RETENTION_BATCH_SIZE` | Number of emails to process per batch iteration. | — |
|
||||
|
||||
## BullMQ Worker Settings
|
||||
|
||||
| Setting | Value | Description |
|
||||
| ------------------ | ---------------------- | ------------------------------------------ |
|
||||
| Queue name | `compliance-lifecycle` | The BullMQ queue name. |
|
||||
| Job ID | `lifecycle-daily` | Stable job ID for the repeatable cron job. |
|
||||
| `removeOnComplete` | Keep last 10 | Completed jobs retained for monitoring. |
|
||||
| `removeOnFail` | Keep last 50 | Failed jobs retained for debugging. |
|
||||
|
||||
## Integration with Deletion Guard
|
||||
|
||||
The core `ArchivedEmailService.deleteArchivedEmail()` method includes a deletion guard controlled by the `ENABLE_DELETION` system setting. When called with `systemDelete: true`, the lifecycle worker bypasses this guard. This design ensures that:
|
||||
|
||||
- Manual user deletions can be disabled organization-wide via the system setting.
|
||||
- Automated retention enforcement always operates regardless of that setting, because retention compliance is a legal obligation that cannot be paused by a UI toggle.
|
||||
141
docs/enterprise/retention-policy/retention-service.md
Normal file
141
docs/enterprise/retention-policy/retention-service.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Retention Policy: Backend Implementation
|
||||
|
||||
The backend implementation of the retention policy engine is handled by the `RetentionService`, located in `packages/enterprise/src/modules/retention-policy/RetentionService.ts`. This service encapsulates all CRUD operations for policies and the core evaluation engine that determines which policies apply to a given email.
|
||||
|
||||
## Database Schema
|
||||
|
||||
The `retention_policies` table is defined in `packages/backend/src/database/schema/compliance.ts` using Drizzle ORM:
|
||||
|
||||
| Column | Type | Description |
|
||||
| ----------------------- | ------------------------- | ------------------------------------------------------------- |
|
||||
| `id` | `uuid` (PK) | Auto-generated unique identifier. |
|
||||
| `name` | `text` (unique, not null) | Human-readable policy name. |
|
||||
| `description` | `text` | Optional description. |
|
||||
| `priority` | `integer` (not null) | Priority for ordering. Lower = higher priority. |
|
||||
| `retention_period_days` | `integer` (not null) | Number of days to retain matching emails. |
|
||||
| `action_on_expiry` | `enum` (not null) | Action on expiry (`delete_permanently`). |
|
||||
| `is_enabled` | `boolean` (default: true) | Whether the policy is active. |
|
||||
| `conditions` | `jsonb` | Serialized `RetentionRuleGroup` or null (null = matches all). |
|
||||
| `ingestion_scope` | `jsonb` | Array of ingestion source UUIDs or null (null = all sources). |
|
||||
| `created_at` | `timestamptz` | Creation timestamp. |
|
||||
| `updated_at` | `timestamptz` | Last update timestamp. |
|
||||
|
||||
## CRUD Operations
|
||||
|
||||
The `RetentionService` class provides the following methods:
|
||||
|
||||
### `createPolicy(data, actorId, actorIp)`
|
||||
|
||||
Inserts a new policy into the database and creates an audit log entry with action type `CREATE` and target type `RetentionPolicy`. The audit log details include the policy name, retention period, priority, action on expiry, and ingestion scope.
|
||||
|
||||
### `getPolicies()`
|
||||
|
||||
Returns all policies ordered by priority ascending. The raw database rows are mapped through `mapDbPolicyToType()`, which converts the DB column `isEnabled` to the shared type field `isActive` and normalizes date fields to ISO strings.
|
||||
|
||||
### `getPolicyById(id)`
|
||||
|
||||
Returns a single policy by UUID, or null if not found.
|
||||
|
||||
### `updatePolicy(id, data, actorId, actorIp)`
|
||||
|
||||
Partially updates a policy — only fields present in the DTO are modified. The `updatedAt` timestamp is always set to the current time. An audit log entry is created with action type `UPDATE`, recording which fields were changed.
|
||||
|
||||
Throws an error if the policy is not found.
|
||||
|
||||
### `deletePolicy(id, actorId, actorIp)`
|
||||
|
||||
Deletes a policy by UUID and creates an audit log entry with action type `DELETE`, recording the deleted policy's name. Returns `false` if the policy was not found.
|
||||
|
||||
## Evaluation Engine
|
||||
|
||||
The evaluation engine is the core logic that determines which policies apply to a given email. It is used by both the lifecycle worker (for automated enforcement) and the policy simulator endpoint (for testing).
|
||||
|
||||
### `evaluateEmail(metadata)`
|
||||
|
||||
This is the primary evaluation method. It accepts email metadata and returns:
|
||||
|
||||
- `appliedRetentionDays`: The longest matching retention period (max-duration-wins).
|
||||
- `matchingPolicyIds`: UUIDs of all policies that matched.
|
||||
- `actionOnExpiry`: Always `"delete_permanently"` in the current implementation.
|
||||
|
||||
The evaluation flow:
|
||||
|
||||
1. **Fetch active policies:** Queries all policies where `isEnabled = true`.
|
||||
2. **Ingestion scope check:** For each policy with a non-null `ingestionScope`, the email's `ingestionSourceId` must be included in the scope array. If not, the policy is skipped.
|
||||
3. **Condition evaluation:** If the policy has no conditions (`null`), it matches all emails within scope. Otherwise, the condition rule group is evaluated.
|
||||
4. **Max-duration-wins:** If multiple policies match, the longest `retentionPeriodDays` is used.
|
||||
5. **Zero means no match:** A return value of `appliedRetentionDays = 0` indicates no policy matched — the lifecycle worker will not delete the email.
|
||||
|
||||
### `_evaluateRuleGroup(group, metadata)`
|
||||
|
||||
Evaluates a `RetentionRuleGroup` using AND or OR logic:
|
||||
|
||||
- **AND:** Every rule in the group must pass.
|
||||
- **OR:** At least one rule must pass.
|
||||
- An empty rules array evaluates to `true`.
|
||||
|
||||
### `_evaluateRule(rule, metadata)`
|
||||
|
||||
Evaluates a single rule against the email metadata. All string comparisons are case-insensitive (both sides are lowercased before comparison). The behavior depends on the field:
|
||||
|
||||
| Field | Behavior |
|
||||
| ----------------- | ------------------------------------------------------------------- |
|
||||
| `sender` | Compares against the sender email address. |
|
||||
| `recipient` | Passes if **any** recipient matches the operator. |
|
||||
| `subject` | Compares against the email subject. |
|
||||
| `attachment_type` | Passes if **any** attachment file extension matches (e.g., `.pdf`). |
|
||||
|
||||
### `_applyOperator(haystack, operator, needle)`
|
||||
|
||||
Applies a string-comparison operator between two pre-lowercased strings:
|
||||
|
||||
| Operator | Implementation |
|
||||
| -------------- | ----------------------------------------------------------------------- |
|
||||
| `equals` | `haystack === needle` |
|
||||
| `not_equals` | `haystack !== needle` |
|
||||
| `contains` | `haystack.includes(needle)` |
|
||||
| `not_contains` | `!haystack.includes(needle)` |
|
||||
| `starts_with` | `haystack.startsWith(needle)` |
|
||||
| `ends_with` | `haystack.endsWith(needle)` |
|
||||
| `domain_match` | `haystack.endsWith('@' + needle)` (auto-prepends `@` if missing) |
|
||||
| `regex_match` | `new RegExp(needle, 'i').test(haystack)` with safety guards (see below) |
|
||||
|
||||
### Security: `regex_match` Safeguards
|
||||
|
||||
The `regex_match` operator includes protections against Regular Expression Denial of Service (ReDoS):
|
||||
|
||||
1. **Length limit:** Patterns exceeding 200 characters (`MAX_REGEX_LENGTH`) are rejected and treated as non-matching. A warning is logged.
|
||||
2. **Error handling:** Invalid regex syntax is caught in a try/catch block and treated as non-matching. A warning is logged.
|
||||
3. **Flags:** Only the case-insensitive flag (`i`) is used. Global and multiline flags are excluded to prevent stateful matching bugs.
|
||||
|
||||
## Request Validation
|
||||
|
||||
The `RetentionPolicyController` (`retention-policy.controller.ts`) validates all incoming requests using Zod schemas before passing data to the service:
|
||||
|
||||
| Constraint | Limit |
|
||||
| --------------------------- | -------------------------------------------------------------- |
|
||||
| Policy name | 1–255 characters. |
|
||||
| Description | Max 1000 characters. |
|
||||
| Priority | Positive integer (≥ 1). |
|
||||
| Retention period | Positive integer (≥ 1 day). |
|
||||
| Rules per group | Max 50. |
|
||||
| Rule value | 1–500 characters. |
|
||||
| Ingestion scope entries | Each must be a valid UUID. Empty arrays are coerced to `null`. |
|
||||
| Evaluate — sender | Max 500 characters. |
|
||||
| Evaluate — recipients | Max 500 entries, each max 500 characters. |
|
||||
| Evaluate — subject | Max 2000 characters. |
|
||||
| Evaluate — attachment types | Max 100 entries, each max 50 characters. |
|
||||
|
||||
## Module Registration
|
||||
|
||||
The `RetentionPolicyModule` (`retention-policy.module.ts`) implements the `ArchiverModule` interface and registers the API routes at:
|
||||
|
||||
```
|
||||
/{api.version}/enterprise/retention-policy
|
||||
```
|
||||
|
||||
All routes are protected by:
|
||||
|
||||
1. `requireAuth` — Ensures the request includes a valid authentication token.
|
||||
2. `featureEnabled(OpenArchiverFeature.RETENTION_POLICY)` — Ensures the enterprise license includes the retention policy feature.
|
||||
3. `requirePermission('manage', 'all')` — Ensures the user has administrative permissions.
|
||||
@@ -2,13 +2,13 @@
|
||||
|
||||
Welcome to Open Archiver! This guide will help you get started with setting up and using the platform.
|
||||
|
||||
## What is Open Archiver? 🛡️
|
||||
## What is Open Archiver?
|
||||
|
||||
**A secure, sovereign, and affordable open-source platform for email archiving and eDiscovery.**
|
||||
|
||||
Open Archiver provides a robust, self-hosted solution for archiving, storing, indexing, and searching emails from major platforms, including Google Workspace (Gmail), Microsoft 365, as well as generic IMAP-enabled email inboxes. Use Open Archiver to keep a permanent, tamper-proof record of your communication history, free from vendor lock-in.
|
||||
|
||||
## Key Features ✨
|
||||
## Key Features
|
||||
|
||||
- **Universal Ingestion**: Connect to Google Workspace, Microsoft 365, and standard IMAP servers to perform initial bulk imports and maintain continuous, real-time synchronization.
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All data is encrypted at rest.
|
||||
@@ -17,7 +17,7 @@ Open Archiver provides a robust, self-hosted solution for archiving, storing, in
|
||||
- **Compliance & Retention**: Define granular retention policies to automatically manage the lifecycle of your data. Place legal holds on communications to prevent deletion during litigation (TBD).
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when (TBD).
|
||||
|
||||
## Installation 🚀
|
||||
## Installation
|
||||
|
||||
To get your own instance of Open Archiver running, follow our detailed installation guide:
|
||||
|
||||
@@ -31,7 +31,7 @@ After deploying the application, you will need to configure one or more ingestio
|
||||
- [Connecting to Microsoft 365](./user-guides/email-providers/microsoft-365.md)
|
||||
- [Connecting to a Generic IMAP Server](./user-guides/email-providers/imap.md)
|
||||
|
||||
## Contributing ❤️
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
|
||||
81
docs/services/job-queue.md
Normal file
81
docs/services/job-queue.md
Normal file
@@ -0,0 +1,81 @@
|
||||
# Job Queue Service
|
||||
|
||||
This document describes the architecture of the job queue system, including the sync cycle coordination mechanism and relevant configuration options.
|
||||
|
||||
## Architecture
|
||||
|
||||
The job queue system is built on [BullMQ](https://docs.bullmq.io/) backed by Redis (Valkey). Two worker processes run independently:
|
||||
|
||||
- **Ingestion worker** (`ingestion.worker.ts`) — processes the `ingestion` queue
|
||||
- **Indexing worker** (`indexing.worker.ts`) — processes the `indexing` queue
|
||||
|
||||
### Queues
|
||||
|
||||
| Queue | Jobs | Purpose |
|
||||
| ----------- | --------------------------------------------------------------------------------------------------------- | -------------------------------------- |
|
||||
| `ingestion` | `schedule-continuous-sync`, `continuous-sync`, `initial-import`, `process-mailbox`, `sync-cycle-finished` | Email ingestion and sync orchestration |
|
||||
| `indexing` | `index-email-batch` | Meilisearch document indexing |
|
||||
|
||||
### Job Flow
|
||||
|
||||
```
|
||||
[schedule-continuous-sync] (repeating cron)
|
||||
└→ [continuous-sync] (per ingestion source)
|
||||
└→ [process-mailbox] × N (one per user mailbox)
|
||||
└→ [index-email-batch] (batched, on indexing queue)
|
||||
└→ [sync-cycle-finished] (dispatched by the last mailbox job)
|
||||
```
|
||||
|
||||
For initial imports, `initial-import` triggers the same `process-mailbox` → `sync-cycle-finished` flow.
|
||||
|
||||
## Sync Cycle Coordination
|
||||
|
||||
Sync cycle completion (knowing when all mailboxes in a sync have finished) is coordinated via the `sync_sessions` PostgreSQL table rather than BullMQ's built-in flow/parent-child system.
|
||||
|
||||
**Why:** BullMQ's `FlowProducer` stores the entire parent/child relationship in Redis atomically. For large tenants with thousands of mailboxes, this creates large Redis writes and requires loading all child job return values into memory at once for aggregation.
|
||||
|
||||
**How it works:**
|
||||
|
||||
1. When `initial-import` or `continuous-sync` starts, it creates a `sync_sessions` row with `total_mailboxes = N`.
|
||||
2. Each `process-mailbox` job atomically increments `completed_mailboxes` or `failed_mailboxes` when it finishes, and merges its `SyncState` into `ingestion_sources.sync_state` using PostgreSQL's `||` jsonb operator.
|
||||
3. The job that brings `completed + failed` to equal `total` dispatches the `sync-cycle-finished` job.
|
||||
4. `sync-cycle-finished` reads the aggregated results from the session row and finalizes the source status.
|
||||
5. The session row is deleted after finalization.
|
||||
|
||||
### Session Heartbeat
|
||||
|
||||
Each `process-mailbox` job updates `last_activity_at` on the session every time it flushes an email batch to the indexing queue. This prevents the stale session detector from treating an actively processing large mailbox as stuck.
|
||||
|
||||
### Stale Session Detection
|
||||
|
||||
The `schedule-continuous-sync` job runs `SyncSessionService.cleanStaleSessions()` on every tick. A session is considered stale when `last_activity_at` has not been updated for 30 minutes, indicating the worker that created it has crashed before all mailbox jobs were enqueued.
|
||||
|
||||
When a stale session is detected:
|
||||
|
||||
1. The associated ingestion source is set to `status: 'error'` with a descriptive message.
|
||||
2. The session row is deleted.
|
||||
3. On the next scheduler tick, the source is picked up as an `error` source and a new `continuous-sync` job is dispatched.
|
||||
|
||||
Already-ingested emails from the partial sync are preserved. The next sync skips them via duplicate detection (`checkDuplicate()`).
|
||||
|
||||
## Configuration
|
||||
|
||||
| Environment Variable | Default | Description |
|
||||
| ------------------------------ | ----------- | ----------------------------------------------------- |
|
||||
| `SYNC_FREQUENCY` | `* * * * *` | Cron pattern for continuous sync scheduling |
|
||||
| `INGESTION_WORKER_CONCURRENCY` | `5` | Number of `process-mailbox` jobs that run in parallel |
|
||||
| `MEILI_INDEXING_BATCH` | `500` | Number of emails per `index-email-batch` job |
|
||||
|
||||
### Tuning `INGESTION_WORKER_CONCURRENCY`
|
||||
|
||||
Each `process-mailbox` job holds at most one parsed email in memory at a time during the ingestion loop. At typical email sizes (~50KB average), memory pressure per concurrent job is low. Increase this value on servers with more RAM to process multiple mailboxes in parallel and reduce total sync time.
|
||||
|
||||
### Tuning `MEILI_INDEXING_BATCH`
|
||||
|
||||
Each `index-email-batch` job loads the `.eml` file and all attachments from storage into memory for text extraction before sending to Meilisearch. Reduce this value if the indexing worker experiences memory pressure on deployments with large attachments.
|
||||
|
||||
## Resilience
|
||||
|
||||
- **Job retries:** All jobs are configured with 5 retry attempts using exponential backoff (starting at 1 second). This handles transient API failures from email providers.
|
||||
- **Worker crash recovery:** BullMQ detects stalled jobs (no heartbeat within `lockDuration`) and re-queues them automatically. On retry, already-processed emails are skipped via `checkDuplicate()`.
|
||||
- **Partial sync recovery:** Stale session detection handles the case where a worker crashes mid-dispatch, leaving some mailboxes never enqueued. The source is reset to `error` and the next scheduler tick retries the full sync.
|
||||
@@ -31,12 +31,13 @@ archive.zip
|
||||
3. Select **EML Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. **Choose Import Method:**
|
||||
* **Upload File:** Click **Choose File** and select the zip archive containing your EML files. (Best for smaller archives)
|
||||
* **Local Path:** Enter the path to the zip file **inside the container**. (Best for large archives)
|
||||
- **Upload File:** Click **Choose File** and select the zip archive containing your EML files. (Best for smaller archives)
|
||||
- **Local Path:** Enter the path to the zip file **inside the container**. (Best for large archives)
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your zip file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.zip` and enter `/data/temp/emails.zip` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
>
|
||||
> - **Recommended:** Place your zip file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.zip` and enter `/data/temp/emails.zip` as the path.
|
||||
> - **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
6. Click the **Submit** button.
|
||||
|
||||
|
||||
@@ -10,3 +10,4 @@ Choose your provider from the list below to get started:
|
||||
- [EML Import](./eml.md)
|
||||
- [PST Import](./pst.md)
|
||||
- [Mbox Import](./mbox.md)
|
||||
- [Merging Ingestion Sources](./merging-sources.md)
|
||||
|
||||
@@ -18,12 +18,13 @@ Once you have your `.mbox` file, you can upload it to OpenArchiver through the w
|
||||
2. Click on the **New Ingestion** button.
|
||||
3. Select **Mbox** as the source type.
|
||||
4. **Choose Import Method:**
|
||||
* **Upload File:** Upload your `.mbox` file.
|
||||
* **Local Path:** Enter the path to the mbox file **inside the container**.
|
||||
- **Upload File:** Upload your `.mbox` file.
|
||||
- **Local Path:** Enter the path to the mbox file **inside the container**.
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your mbox file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.mbox` and enter `/data/temp/emails.mbox` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
>
|
||||
> - **Recommended:** Place your mbox file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.mbox` and enter `/data/temp/emails.mbox` as the path.
|
||||
> - **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
## 3. Folder Structure
|
||||
|
||||
|
||||
105
docs/user-guides/email-providers/merging-sources.md
Normal file
105
docs/user-guides/email-providers/merging-sources.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# Merging Ingestion Sources
|
||||
|
||||
Merged ingestion groups let you combine multiple ingestion sources so that their emails appear unified in browsing, search, and thread views. This is useful when you want to pair a historical archive (for example, a PST or Mbox import) with a live connection, or when migrating between providers.
|
||||
|
||||
## Concepts
|
||||
|
||||
| Term | Definition |
|
||||
| ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Root source** | An ingestion source where no merge parent is set. Shown as the primary row in the Ingestions table. All emails in the group are physically owned by the root. |
|
||||
| **Child source** | An ingestion source merged into a root. Acts as a fetch assistant — it connects to the provider and retrieves emails, but all data is stored under the root source. |
|
||||
| **Group** | A root source and all its children. All emails from every member are stored under and owned by the root. |
|
||||
|
||||
The hierarchy is **flat** — only one level of nesting is supported. If you merge a source into a child, the system automatically redirects the relationship to the root.
|
||||
|
||||
## Root Ownership — How Storage and Data Work
|
||||
|
||||
This is the key design principle of merged sources:
|
||||
|
||||
> **Child sources are assistants. They fetch emails from their provider but never own any stored data. Every email ingested by a child is written to the root source's storage folder and assigned the root source's ID in the database.**
|
||||
|
||||
In practical terms:
|
||||
|
||||
- The storage path for every email belongs to the root: `openarchiver/{root-name}-{root-id}/emails/...`
|
||||
- Every `archived_emails` database row created by a child ingestion will have `ingestionSourceId` set to the **root's ID**, not the child's.
|
||||
- Attachments are also stored under the root's folder and scoped to the root's ID.
|
||||
- The root's **Preserve Original File** (GoBD compliance) setting is inherited by all children in the group. A child's own `preserveOriginalFile` setting is ignored during ingestion — only the root's setting applies.
|
||||
|
||||
This means browsing the root source's emails will show all emails from the entire group, including those fetched by child sources, without any extra configuration.
|
||||
|
||||
## When to Use Merged Sources
|
||||
|
||||
- **Historical + live**: Import a PST archive and merge it into an active IMAP or Google Workspace connection so historical and current emails appear in one unified mailbox.
|
||||
- **Provider migration**: Add a new Microsoft 365 connector and merge it with your existing Google Workspace connector during a cutover period.
|
||||
- **Backfill**: Import an Mbox export and merge it with a live connection to cover a gap in the archive.
|
||||
|
||||
## How to Merge a New Source Into an Existing One
|
||||
|
||||
Merging can only be configured **at creation time**.
|
||||
|
||||
1. Navigate to the **Ingestions** page.
|
||||
2. Click **Create New** to open the ingestion source form.
|
||||
3. Fill in the provider details as usual.
|
||||
4. Expand the **Advanced Options** section at the bottom of the form. This section is only visible when at least one ingestion source already exists.
|
||||
5. Check **Merge into existing ingestion** and select the target root source from the dropdown.
|
||||
6. Click **Submit**.
|
||||
|
||||
The new source will run its initial import normally. Once complete, its emails will appear alongside those of the root source — all stored under the root.
|
||||
|
||||
## How Emails Appear When Merged
|
||||
|
||||
When you browse archived emails for a root source, you see all emails in the group because they are all physically owned by the root. There is nothing to aggregate — the data is already unified at the storage and database level.
|
||||
|
||||
The same applies to search: filtering by a root source ID returns all emails in the group.
|
||||
|
||||
Threads also span the merge group. If a reply arrived via a different source than the original message, it still appears in the correct thread.
|
||||
|
||||
## How Syncing Works
|
||||
|
||||
Each source syncs **independently**. The scheduler picks up all sources with status `active` or `error`, regardless of whether they are merged.
|
||||
|
||||
- File-based imports (PST, EML, Mbox) finish with status `imported` and are never re-synced automatically.
|
||||
- Live sources (IMAP, Google Workspace, Microsoft 365) continue their normal sync cycle.
|
||||
|
||||
When you trigger **Force Sync** on a root source, the system also queues a sync for all non-file-based children that are currently `active` or `error`.
|
||||
|
||||
## Deduplication Across the Group
|
||||
|
||||
When ingesting emails, duplicate detection covers the **entire merge group**. If the same email (matched by its RFC `Message-ID` header or provider-specific ID) already exists anywhere in the group, it is skipped and not stored again.
|
||||
|
||||
## Preserve Original File (GoBD Compliance) and Merged Sources
|
||||
|
||||
The **Preserve Original File** setting on the root source governs the entire group. When this setting is enabled on the root:
|
||||
|
||||
- All emails ingested by child sources are also stored unmodified (raw EML, no attachment stripping).
|
||||
- The child's own `preserveOriginalFile` setting has no effect — the root's setting is always used.
|
||||
|
||||
This ensures consistent compliance behaviour across the group. If you require GoBD or SEC 17a-4 compliance for an entire merged group, enable **Preserve Original File** on the root source before adding any children.
|
||||
|
||||
## Editing Sources in a Group
|
||||
|
||||
Each source in a group can be edited independently. Expand the group row in the Ingestions table by clicking the chevron, then use the **⋮** actions menu on the specific source (root or child) you want to edit.
|
||||
|
||||
## Unmerging a Child Source
|
||||
|
||||
To detach a child from its group and make it standalone:
|
||||
|
||||
1. Expand the group row by clicking the chevron next to the root source name.
|
||||
2. Open the **⋮** actions menu on the child source.
|
||||
3. Click **Unmerge**.
|
||||
|
||||
The child becomes an independent root source. No email data is moved or deleted.
|
||||
|
||||
> **Note:** Because all emails fetched by the child were stored under the root source's ID, unmerging the child does not transfer those emails. Historical emails ingested while the source was a child remain owned by the root. Only new emails ingested after unmerging will be stored under the (now standalone) child.
|
||||
|
||||
## Deleting Sources in a Group
|
||||
|
||||
- **Deleting a root source** also deletes all its children: their configuration, and all emails, attachments, storage files, and search index entries owned by the root are all removed. Because all group emails are stored under the root, this effectively removes the entire group's archive.
|
||||
- **Deleting a child source** removes only the child's configuration and sync state. Emails already ingested by the child are stored under the root and are **not** deleted.
|
||||
|
||||
A warning is shown in the delete confirmation dialog when a root source has children.
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- **Merging existing standalone sources is not supported.** You can only merge a source into a group at creation time. To merge two existing sources, you must delete one and recreate it with the merge target selected.
|
||||
- **Historical data from a child source before unmerging remains with the root.** If you unmerge a child, emails it previously ingested stay owned by the root and are not migrated to the child.
|
||||
@@ -16,12 +16,13 @@ To ensure a successful import, you should prepare your PST file according to the
|
||||
3. Select **PST Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. **Choose Import Method:**
|
||||
* **Upload File:** Click **Choose File** and select the PST file from your computer. (Best for smaller files)
|
||||
* **Local Path:** Enter the path to the PST file **inside the container**. (Best for large files)
|
||||
- **Upload File:** Click **Choose File** and select the PST file from your computer. (Best for smaller files)
|
||||
- **Local Path:** Enter the path to the PST file **inside the container**. (Best for large files)
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/archive.pst` and enter `/data/temp/archive.pst` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
>
|
||||
> - **Recommended:** Place your file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/archive.pst` and enter `/data/temp/archive.pst` as the path.
|
||||
> - **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
6. Click the **Submit** button.
|
||||
|
||||
|
||||
@@ -24,11 +24,11 @@ Add the `MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE` environment variable to your `dock
|
||||
|
||||
```yaml
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
environment:
|
||||
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY}
|
||||
- MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE=true
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
environment:
|
||||
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY}
|
||||
- MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE=true
|
||||
```
|
||||
|
||||
**Option 2: Using a CLI Option**
|
||||
@@ -37,9 +37,9 @@ Alternatively, you can pass the `--experimental-dumpless-upgrade` flag in the co
|
||||
|
||||
```yaml
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
command: meilisearch --experimental-dumpless-upgrade
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
command: meilisearch --experimental-dumpless-upgrade
|
||||
```
|
||||
|
||||
After updating your configuration, restart your container:
|
||||
|
||||
14
package.json
14
package.json
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "open-archiver",
|
||||
"version": "0.4.2",
|
||||
"version": "0.5.1",
|
||||
"private": true,
|
||||
"license": "SEE LICENSE IN LICENSE file",
|
||||
"scripts": {
|
||||
@@ -8,8 +8,8 @@
|
||||
"build:enterprise": "cross-env VITE_ENTERPRISE_MODE=true pnpm build",
|
||||
"start:oss": "dotenv -- concurrently \"node apps/open-archiver/dist/index.js\" \"pnpm --filter @open-archiver/frontend start\"",
|
||||
"start:enterprise": "dotenv -- concurrently \"node apps/open-archiver-enterprise/dist/index.js\" \"pnpm --filter @open-archiver/frontend start\"",
|
||||
"dev:enterprise": "cross-env VITE_ENTERPRISE_MODE=true dotenv -- pnpm --filter \"@open-archiver/*\" --filter \"open-archiver-enterprise-app\" --parallel dev",
|
||||
"dev:oss": "dotenv -- pnpm --filter \"./packages/*\" --filter \"!./packages/@open-archiver/enterprise\" --filter \"open-archiver-app\" --parallel dev",
|
||||
"dev:enterprise": "cross-env VITE_ENTERPRISE_MODE=true dotenv -- pnpm --filter \"@open-archiver/*\" --filter \"open-archiver-enterprise-app\" --parallel dev & pnpm run start:workers:dev",
|
||||
"dev:oss": "dotenv -- pnpm --filter \"./packages/*\" --filter \"!./packages/@open-archiver/enterprise\" --filter \"open-archiver-app\" --parallel dev & pnpm run start:workers:dev",
|
||||
"build": "pnpm --filter \"./packages/*\" --filter \"./apps/*\" build",
|
||||
"start": "dotenv -- pnpm --filter \"open-archiver-app\" --parallel start",
|
||||
"start:workers": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker\" \"pnpm --filter @open-archiver/backend start:indexing-worker\" \"pnpm --filter @open-archiver/backend start:sync-scheduler\"",
|
||||
@@ -19,8 +19,9 @@
|
||||
"db:migrate:dev": "dotenv -- pnpm --filter @open-archiver/backend db:migrate:dev",
|
||||
"docker-start:oss": "concurrently \"pnpm start:workers\" \"pnpm start:oss\"",
|
||||
"docker-start:enterprise": "concurrently \"pnpm start:workers\" \"pnpm start:enterprise\"",
|
||||
"docs:dev": "vitepress dev docs --port 3009",
|
||||
"docs:build": "vitepress build docs",
|
||||
"docs:gen-spec": "node packages/backend/scripts/generate-openapi-spec.mjs",
|
||||
"docs:dev": "pnpm docs:gen-spec && vitepress dev docs --port 3009",
|
||||
"docs:build": "pnpm docs:gen-spec && vitepress build docs",
|
||||
"docs:preview": "vitepress preview docs",
|
||||
"format": "prettier --write .",
|
||||
"lint": "prettier --check ."
|
||||
@@ -35,7 +36,8 @@
|
||||
"prettier-plugin-svelte": "^3.4.0",
|
||||
"prettier-plugin-tailwindcss": "^0.6.14",
|
||||
"typescript": "5.8.3",
|
||||
"vitepress": "^1.6.4"
|
||||
"vitepress": "^1.6.4",
|
||||
"vitepress-openapi": "^0.1.18"
|
||||
},
|
||||
"packageManager": "pnpm@10.13.1",
|
||||
"engines": {
|
||||
|
||||
@@ -5,6 +5,10 @@
|
||||
"license": "SEE LICENSE IN LICENSE file",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"exports": {
|
||||
".": "./dist/index.js",
|
||||
"./*": "./dist/*.js"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsc && pnpm copy-assets",
|
||||
"dev": "tsc --watch",
|
||||
@@ -52,6 +56,7 @@
|
||||
"mammoth": "^1.9.1",
|
||||
"meilisearch": "^0.51.0",
|
||||
"multer": "^2.0.2",
|
||||
"nodemailer": "^8.0.2",
|
||||
"pdf2json": "^3.1.6",
|
||||
"pg": "^8.16.3",
|
||||
"pino": "^9.7.0",
|
||||
@@ -73,7 +78,10 @@
|
||||
"@types/microsoft-graph": "^2.40.1",
|
||||
"@types/multer": "^2.0.0",
|
||||
"@types/node": "^24.0.12",
|
||||
"@types/nodemailer": "^7.0.11",
|
||||
"@types/swagger-jsdoc": "^6.0.4",
|
||||
"@types/yauzl": "^2.10.3",
|
||||
"swagger-jsdoc": "^6.2.8",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "^5.8.3"
|
||||
|
||||
735
packages/backend/scripts/generate-openapi-spec.mjs
Normal file
735
packages/backend/scripts/generate-openapi-spec.mjs
Normal file
@@ -0,0 +1,735 @@
|
||||
/**
|
||||
* Generates the OpenAPI specification from swagger-jsdoc annotations in the route files.
|
||||
* Outputs the spec to docs/api/openapi.json for use with vitepress-openapi.
|
||||
*
|
||||
* Run: node packages/backend/scripts/generate-openapi-spec.mjs
|
||||
*/
|
||||
import swaggerJsdoc from 'swagger-jsdoc';
|
||||
import { writeFileSync, mkdirSync } from 'fs';
|
||||
import { resolve, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const options = {
|
||||
definition: {
|
||||
openapi: '3.1.0',
|
||||
info: {
|
||||
title: 'Open Archiver API',
|
||||
version: '1.0.0',
|
||||
description:
|
||||
'REST API for Open Archiver — an open-source email archiving platform. All authenticated endpoints require a Bearer JWT token obtained from `POST /v1/auth/login`, or an API key passed as a Bearer token.',
|
||||
license: {
|
||||
name: 'SEE LICENSE IN LICENSE',
|
||||
url: 'https://github.com/LogicLabs-OU/OpenArchiver/blob/main/LICENSE',
|
||||
},
|
||||
contact: {
|
||||
name: 'Open Archiver',
|
||||
url: 'https://openarchiver.com',
|
||||
},
|
||||
},
|
||||
servers: [
|
||||
{
|
||||
url: 'http://localhost:3000',
|
||||
description: 'Local development',
|
||||
},
|
||||
],
|
||||
// Both security schemes apply globally; individual endpoints may override
|
||||
security: [{ bearerAuth: [] }, { apiKeyAuth: [] }],
|
||||
components: {
|
||||
securitySchemes: {
|
||||
bearerAuth: {
|
||||
type: 'http',
|
||||
scheme: 'bearer',
|
||||
bearerFormat: 'JWT',
|
||||
description:
|
||||
'JWT obtained from `POST /v1/auth/login`. Pass as `Authorization: Bearer <token>`.',
|
||||
},
|
||||
apiKeyAuth: {
|
||||
type: 'apiKey',
|
||||
in: 'header',
|
||||
name: 'X-API-KEY',
|
||||
description:
|
||||
'API key generated via `POST /v1/api-keys`. Pass as `X-API-KEY: <key>`.',
|
||||
},
|
||||
},
|
||||
responses: {
|
||||
Unauthorized: {
|
||||
description: 'Authentication is required or the token is invalid/expired.',
|
||||
content: {
|
||||
'application/json': {
|
||||
schema: { $ref: '#/components/schemas/ErrorMessage' },
|
||||
example: { message: 'Unauthorized' },
|
||||
},
|
||||
},
|
||||
},
|
||||
Forbidden: {
|
||||
description:
|
||||
'The authenticated user does not have permission to perform this action.',
|
||||
content: {
|
||||
'application/json': {
|
||||
schema: { $ref: '#/components/schemas/ErrorMessage' },
|
||||
example: { message: 'Forbidden' },
|
||||
},
|
||||
},
|
||||
},
|
||||
NotFound: {
|
||||
description: 'The requested resource was not found.',
|
||||
content: {
|
||||
'application/json': {
|
||||
schema: { $ref: '#/components/schemas/ErrorMessage' },
|
||||
example: { message: 'Not found' },
|
||||
},
|
||||
},
|
||||
},
|
||||
InternalServerError: {
|
||||
description: 'An unexpected error occurred on the server.',
|
||||
content: {
|
||||
'application/json': {
|
||||
schema: { $ref: '#/components/schemas/ErrorMessage' },
|
||||
example: { message: 'Internal server error' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schemas: {
|
||||
// --- Shared utility schemas ---
|
||||
ErrorMessage: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'Human-readable error description.',
|
||||
example: 'An error occurred.',
|
||||
},
|
||||
},
|
||||
required: ['message'],
|
||||
},
|
||||
MessageResponse: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: {
|
||||
type: 'string',
|
||||
example: 'Operation completed successfully.',
|
||||
},
|
||||
},
|
||||
required: ['message'],
|
||||
},
|
||||
ValidationError: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
message: {
|
||||
type: 'string',
|
||||
example: 'Request body is invalid.',
|
||||
},
|
||||
errors: {
|
||||
type: 'string',
|
||||
description: 'Zod validation error details.',
|
||||
},
|
||||
},
|
||||
required: ['message'],
|
||||
},
|
||||
// --- Auth ---
|
||||
LoginResponse: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
accessToken: {
|
||||
type: 'string',
|
||||
description: 'JWT for authenticating subsequent requests.',
|
||||
example: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...',
|
||||
},
|
||||
user: {
|
||||
$ref: '#/components/schemas/User',
|
||||
},
|
||||
},
|
||||
required: ['accessToken', 'user'],
|
||||
},
|
||||
// --- Users ---
|
||||
User: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
first_name: { type: 'string', nullable: true, example: 'Jane' },
|
||||
last_name: { type: 'string', nullable: true, example: 'Doe' },
|
||||
email: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
example: 'jane.doe@example.com',
|
||||
},
|
||||
role: {
|
||||
$ref: '#/components/schemas/Role',
|
||||
nullable: true,
|
||||
},
|
||||
createdAt: { type: 'string', format: 'date-time' },
|
||||
},
|
||||
required: ['id', 'email', 'createdAt'],
|
||||
},
|
||||
// --- IAM ---
|
||||
Role: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
slug: { type: 'string', nullable: true, example: 'predefined_super_admin' },
|
||||
name: { type: 'string', example: 'Super Admin' },
|
||||
policies: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/CaslPolicy' },
|
||||
},
|
||||
createdAt: { type: 'string', format: 'date-time' },
|
||||
updatedAt: { type: 'string', format: 'date-time' },
|
||||
},
|
||||
required: ['id', 'name', 'policies', 'createdAt', 'updatedAt'],
|
||||
},
|
||||
CaslPolicy: {
|
||||
type: 'object',
|
||||
description:
|
||||
'An CASL-style permission policy statement. `action` and `subject` can be strings or arrays of strings. `conditions` optionally restricts access to specific resource attributes.',
|
||||
properties: {
|
||||
action: {
|
||||
oneOf: [
|
||||
{
|
||||
type: 'string',
|
||||
example: 'read',
|
||||
},
|
||||
{
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
example: ['read', 'search'],
|
||||
},
|
||||
],
|
||||
},
|
||||
subject: {
|
||||
oneOf: [
|
||||
{
|
||||
type: 'string',
|
||||
example: 'archive',
|
||||
},
|
||||
{
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
example: ['archive', 'ingestion'],
|
||||
},
|
||||
],
|
||||
},
|
||||
conditions: {
|
||||
type: 'object',
|
||||
description:
|
||||
'Optional attribute-level conditions. Supports `${user.id}` interpolation.',
|
||||
example: { userId: '${user.id}' },
|
||||
},
|
||||
},
|
||||
required: ['action', 'subject'],
|
||||
},
|
||||
// --- API Keys ---
|
||||
ApiKey: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
name: { type: 'string', example: 'CI/CD Pipeline Key' },
|
||||
key: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Partial/masked key — the raw value is only available at creation time.',
|
||||
example: 'oa_live_abc1...',
|
||||
},
|
||||
expiresAt: { type: 'string', format: 'date-time' },
|
||||
createdAt: { type: 'string', format: 'date-time' },
|
||||
},
|
||||
required: ['id', 'name', 'expiresAt', 'createdAt'],
|
||||
},
|
||||
// --- Ingestion ---
|
||||
SafeIngestionSource: {
|
||||
type: 'object',
|
||||
description: 'An ingestion source with sensitive credential fields removed.',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
name: { type: 'string', example: 'Company Google Workspace' },
|
||||
provider: {
|
||||
type: 'string',
|
||||
enum: [
|
||||
'google_workspace',
|
||||
'microsoft_365',
|
||||
'generic_imap',
|
||||
'pst_import',
|
||||
'eml_import',
|
||||
'mbox_import',
|
||||
],
|
||||
example: 'google_workspace',
|
||||
},
|
||||
status: {
|
||||
type: 'string',
|
||||
enum: [
|
||||
'active',
|
||||
'paused',
|
||||
'error',
|
||||
'pending_auth',
|
||||
'syncing',
|
||||
'importing',
|
||||
'auth_success',
|
||||
'imported',
|
||||
],
|
||||
example: 'active',
|
||||
},
|
||||
createdAt: { type: 'string', format: 'date-time' },
|
||||
updatedAt: { type: 'string', format: 'date-time' },
|
||||
lastSyncStartedAt: { type: 'string', format: 'date-time', nullable: true },
|
||||
lastSyncFinishedAt: { type: 'string', format: 'date-time', nullable: true },
|
||||
lastSyncStatusMessage: { type: 'string', nullable: true },
|
||||
},
|
||||
required: ['id', 'name', 'provider', 'status', 'createdAt', 'updatedAt'],
|
||||
},
|
||||
CreateIngestionSourceDto: {
|
||||
type: 'object',
|
||||
required: ['name', 'provider', 'providerConfig'],
|
||||
properties: {
|
||||
name: {
|
||||
type: 'string',
|
||||
example: 'Company Google Workspace',
|
||||
},
|
||||
provider: {
|
||||
type: 'string',
|
||||
enum: [
|
||||
'google_workspace',
|
||||
'microsoft_365',
|
||||
'generic_imap',
|
||||
'pst_import',
|
||||
'eml_import',
|
||||
'mbox_import',
|
||||
],
|
||||
},
|
||||
providerConfig: {
|
||||
type: 'object',
|
||||
description:
|
||||
'Provider-specific configuration. See the ingestion source guides for the required fields per provider.',
|
||||
example: {
|
||||
serviceAccountKeyJson: '{"type":"service_account",...}',
|
||||
impersonatedAdminEmail: 'admin@example.com',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
UpdateIngestionSourceDto: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string' },
|
||||
provider: {
|
||||
type: 'string',
|
||||
enum: [
|
||||
'google_workspace',
|
||||
'microsoft_365',
|
||||
'generic_imap',
|
||||
'pst_import',
|
||||
'eml_import',
|
||||
'mbox_import',
|
||||
],
|
||||
},
|
||||
status: {
|
||||
type: 'string',
|
||||
enum: [
|
||||
'active',
|
||||
'paused',
|
||||
'error',
|
||||
'pending_auth',
|
||||
'syncing',
|
||||
'importing',
|
||||
'auth_success',
|
||||
'imported',
|
||||
],
|
||||
},
|
||||
providerConfig: { type: 'object' },
|
||||
},
|
||||
},
|
||||
// --- Archived Emails ---
|
||||
Recipient: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string', nullable: true, example: 'John Doe' },
|
||||
email: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
example: 'john.doe@example.com',
|
||||
},
|
||||
},
|
||||
required: ['email'],
|
||||
},
|
||||
Attachment: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
filename: { type: 'string', example: 'invoice.pdf' },
|
||||
mimeType: { type: 'string', nullable: true, example: 'application/pdf' },
|
||||
sizeBytes: { type: 'integer', example: 204800 },
|
||||
storagePath: {
|
||||
type: 'string',
|
||||
example: 'open-archiver/attachments/abc123.pdf',
|
||||
},
|
||||
},
|
||||
required: ['id', 'filename', 'sizeBytes', 'storagePath'],
|
||||
},
|
||||
// Minimal representation of an email within a thread (returned alongside ArchivedEmail)
|
||||
ThreadEmail: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'ArchivedEmail ID.',
|
||||
example: 'clx1y2z3a0000b4d2',
|
||||
},
|
||||
subject: { type: 'string', nullable: true, example: 'Re: Q4 Invoice' },
|
||||
sentAt: { type: 'string', format: 'date-time' },
|
||||
senderEmail: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
example: 'finance@vendor.com',
|
||||
},
|
||||
},
|
||||
required: ['id', 'sentAt', 'senderEmail'],
|
||||
},
|
||||
ArchivedEmail: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
ingestionSourceId: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
userEmail: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
example: 'user@company.com',
|
||||
},
|
||||
messageIdHeader: { type: 'string', nullable: true },
|
||||
sentAt: { type: 'string', format: 'date-time' },
|
||||
subject: { type: 'string', nullable: true, example: 'Q4 Invoice' },
|
||||
senderName: { type: 'string', nullable: true, example: 'Finance Dept' },
|
||||
senderEmail: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
example: 'finance@vendor.com',
|
||||
},
|
||||
recipients: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/Recipient' },
|
||||
},
|
||||
storagePath: { type: 'string' },
|
||||
storageHashSha256: {
|
||||
type: 'string',
|
||||
description:
|
||||
'SHA-256 hash of the raw email file, stored at archival time.',
|
||||
},
|
||||
sizeBytes: { type: 'integer' },
|
||||
isIndexed: { type: 'boolean' },
|
||||
hasAttachments: { type: 'boolean' },
|
||||
isOnLegalHold: { type: 'boolean' },
|
||||
archivedAt: { type: 'string', format: 'date-time' },
|
||||
attachments: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/Attachment' },
|
||||
},
|
||||
thread: {
|
||||
type: 'array',
|
||||
description:
|
||||
'Other emails in the same thread, ordered by sentAt. Only present on single-email GET responses.',
|
||||
items: { $ref: '#/components/schemas/ThreadEmail' },
|
||||
},
|
||||
path: { type: 'string', nullable: true },
|
||||
tags: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
nullable: true,
|
||||
},
|
||||
},
|
||||
required: [
|
||||
'id',
|
||||
'ingestionSourceId',
|
||||
'userEmail',
|
||||
'sentAt',
|
||||
'senderEmail',
|
||||
'recipients',
|
||||
'storagePath',
|
||||
'storageHashSha256',
|
||||
'sizeBytes',
|
||||
'isIndexed',
|
||||
'hasAttachments',
|
||||
'isOnLegalHold',
|
||||
'archivedAt',
|
||||
],
|
||||
},
|
||||
PaginatedArchivedEmails: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
items: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/ArchivedEmail' },
|
||||
},
|
||||
total: { type: 'integer', example: 1234 },
|
||||
page: { type: 'integer', example: 1 },
|
||||
limit: { type: 'integer', example: 10 },
|
||||
},
|
||||
required: ['items', 'total', 'page', 'limit'],
|
||||
},
|
||||
// --- Search ---
|
||||
SearchResults: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
hits: {
|
||||
type: 'array',
|
||||
description:
|
||||
'Array of matching archived email objects, potentially with highlighted fields.',
|
||||
items: { type: 'object' },
|
||||
},
|
||||
total: { type: 'integer', example: 42 },
|
||||
page: { type: 'integer', example: 1 },
|
||||
limit: { type: 'integer', example: 10 },
|
||||
totalPages: { type: 'integer', example: 5 },
|
||||
processingTimeMs: {
|
||||
type: 'integer',
|
||||
description: 'Meilisearch query processing time in milliseconds.',
|
||||
example: 12,
|
||||
},
|
||||
},
|
||||
required: ['hits', 'total', 'page', 'limit', 'totalPages', 'processingTimeMs'],
|
||||
},
|
||||
// --- Integrity ---
|
||||
IntegrityCheckResult: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
type: {
|
||||
type: 'string',
|
||||
enum: ['email', 'attachment'],
|
||||
description:
|
||||
'Whether this result is for the email itself or one of its attachments.',
|
||||
},
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
filename: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Attachment filename. Only present when `type` is `attachment`.',
|
||||
example: 'invoice.pdf',
|
||||
},
|
||||
isValid: {
|
||||
type: 'boolean',
|
||||
description: 'True if the stored and computed hashes match.',
|
||||
},
|
||||
reason: {
|
||||
type: 'string',
|
||||
description: 'Human-readable explanation if `isValid` is false.',
|
||||
},
|
||||
storedHash: {
|
||||
type: 'string',
|
||||
description: 'SHA-256 hash stored at archival time.',
|
||||
example: 'a3f1b2c4...',
|
||||
},
|
||||
computedHash: {
|
||||
type: 'string',
|
||||
description: 'SHA-256 hash computed during this verification run.',
|
||||
example: 'a3f1b2c4...',
|
||||
},
|
||||
},
|
||||
required: ['type', 'id', 'isValid', 'storedHash', 'computedHash'],
|
||||
},
|
||||
// --- Jobs ---
|
||||
QueueCounts: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
active: { type: 'integer', example: 0 },
|
||||
completed: { type: 'integer', example: 56 },
|
||||
failed: { type: 'integer', example: 4 },
|
||||
delayed: { type: 'integer', example: 0 },
|
||||
waiting: { type: 'integer', example: 0 },
|
||||
paused: { type: 'integer', example: 0 },
|
||||
},
|
||||
required: ['active', 'completed', 'failed', 'delayed', 'waiting', 'paused'],
|
||||
},
|
||||
QueueOverview: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string', example: 'ingestion' },
|
||||
counts: { $ref: '#/components/schemas/QueueCounts' },
|
||||
},
|
||||
required: ['name', 'counts'],
|
||||
},
|
||||
Job: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
id: { type: 'string', nullable: true, example: '1' },
|
||||
name: { type: 'string', example: 'initial-import' },
|
||||
data: {
|
||||
type: 'object',
|
||||
description: 'Job payload data.',
|
||||
example: { ingestionSourceId: 'clx1y2z3a0000b4d2' },
|
||||
},
|
||||
state: {
|
||||
type: 'string',
|
||||
enum: ['active', 'completed', 'failed', 'delayed', 'waiting', 'paused'],
|
||||
example: 'failed',
|
||||
},
|
||||
failedReason: {
|
||||
type: 'string',
|
||||
nullable: true,
|
||||
example: 'Error: Connection timed out',
|
||||
},
|
||||
timestamp: { type: 'integer', example: 1678886400000 },
|
||||
processedOn: { type: 'integer', nullable: true, example: 1678886401000 },
|
||||
finishedOn: { type: 'integer', nullable: true, example: 1678886402000 },
|
||||
attemptsMade: { type: 'integer', example: 5 },
|
||||
stacktrace: {
|
||||
type: 'array',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
returnValue: { nullable: true },
|
||||
ingestionSourceId: { type: 'string', nullable: true },
|
||||
error: {
|
||||
description: 'Shorthand copy of `failedReason` for easier access.',
|
||||
nullable: true,
|
||||
},
|
||||
},
|
||||
required: [
|
||||
'id',
|
||||
'name',
|
||||
'data',
|
||||
'state',
|
||||
'timestamp',
|
||||
'attemptsMade',
|
||||
'stacktrace',
|
||||
],
|
||||
},
|
||||
QueueDetails: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
name: { type: 'string', example: 'ingestion' },
|
||||
counts: { $ref: '#/components/schemas/QueueCounts' },
|
||||
jobs: {
|
||||
type: 'array',
|
||||
items: { $ref: '#/components/schemas/Job' },
|
||||
},
|
||||
pagination: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
currentPage: { type: 'integer', example: 1 },
|
||||
totalPages: { type: 'integer', example: 3 },
|
||||
totalJobs: { type: 'integer', example: 25 },
|
||||
limit: { type: 'integer', example: 10 },
|
||||
},
|
||||
required: ['currentPage', 'totalPages', 'totalJobs', 'limit'],
|
||||
},
|
||||
},
|
||||
required: ['name', 'counts', 'jobs', 'pagination'],
|
||||
},
|
||||
// --- Dashboard ---
|
||||
DashboardStats: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
totalEmailsArchived: { type: 'integer', example: 125000 },
|
||||
totalStorageUsed: {
|
||||
type: 'integer',
|
||||
description: 'Total storage used by all archived emails in bytes.',
|
||||
example: 5368709120,
|
||||
},
|
||||
failedIngestionsLast7Days: {
|
||||
type: 'integer',
|
||||
description:
|
||||
'Number of ingestion sources in error state updated in the last 7 days.',
|
||||
example: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
IngestionSourceStats: {
|
||||
type: 'object',
|
||||
description: 'Summary of an ingestion source including its storage usage.',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
name: { type: 'string', example: 'Company Google Workspace' },
|
||||
provider: { type: 'string', example: 'google_workspace' },
|
||||
status: { type: 'string', example: 'active' },
|
||||
storageUsed: {
|
||||
type: 'integer',
|
||||
description:
|
||||
'Total bytes stored for emails from this ingestion source.',
|
||||
example: 1073741824,
|
||||
},
|
||||
},
|
||||
required: ['id', 'name', 'provider', 'status', 'storageUsed'],
|
||||
},
|
||||
RecentSync: {
|
||||
type: 'object',
|
||||
description: 'Summary of a recent sync session.',
|
||||
properties: {
|
||||
id: { type: 'string', example: 'clx1y2z3a0000b4d2' },
|
||||
sourceName: { type: 'string', example: 'Company Google Workspace' },
|
||||
startTime: { type: 'string', format: 'date-time' },
|
||||
duration: {
|
||||
type: 'integer',
|
||||
description: 'Duration in milliseconds.',
|
||||
example: 4500,
|
||||
},
|
||||
emailsProcessed: { type: 'integer', example: 120 },
|
||||
status: { type: 'string', example: 'completed' },
|
||||
},
|
||||
required: [
|
||||
'id',
|
||||
'sourceName',
|
||||
'startTime',
|
||||
'duration',
|
||||
'emailsProcessed',
|
||||
'status',
|
||||
],
|
||||
},
|
||||
IndexedInsights: {
|
||||
type: 'object',
|
||||
description: 'Insights derived from the search index.',
|
||||
properties: {
|
||||
topSenders: {
|
||||
type: 'array',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
sender: { type: 'string', example: 'finance@vendor.com' },
|
||||
count: { type: 'integer', example: 342 },
|
||||
},
|
||||
required: ['sender', 'count'],
|
||||
},
|
||||
},
|
||||
},
|
||||
required: ['topSenders'],
|
||||
},
|
||||
// --- Settings ---
|
||||
SystemSettings: {
|
||||
type: 'object',
|
||||
description: 'Non-sensitive system configuration values.',
|
||||
properties: {
|
||||
language: {
|
||||
type: 'string',
|
||||
enum: ['en', 'es', 'fr', 'de', 'it', 'pt', 'nl', 'ja', 'et', 'el'],
|
||||
example: 'en',
|
||||
description: 'Default UI language code.',
|
||||
},
|
||||
theme: {
|
||||
type: 'string',
|
||||
enum: ['light', 'dark', 'system'],
|
||||
example: 'system',
|
||||
description: 'Default color theme.',
|
||||
},
|
||||
supportEmail: {
|
||||
type: 'string',
|
||||
format: 'email',
|
||||
nullable: true,
|
||||
example: 'support@example.com',
|
||||
description: 'Public-facing support email address.',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Scan all route files for @openapi annotations
|
||||
apis: [resolve(__dirname, '../src/api/routes/*.ts')],
|
||||
};
|
||||
|
||||
const spec = swaggerJsdoc(options);
|
||||
|
||||
// Output to docs/ directory so VitePress can consume it
|
||||
const outputPath = resolve(__dirname, '../../../docs/api/openapi.json');
|
||||
mkdirSync(dirname(outputPath), { recursive: true });
|
||||
writeFileSync(outputPath, JSON.stringify(spec, null, 2));
|
||||
|
||||
console.log(`✅ OpenAPI spec generated: ${outputPath}`);
|
||||
console.log(` Paths: ${Object.keys(spec.paths ?? {}).length}, Tags: ${(spec.tags ?? []).length}`);
|
||||
@@ -2,6 +2,7 @@ import { Request, Response } from 'express';
|
||||
import { ApiKeyService } from '../../services/ApiKeyService';
|
||||
import { z } from 'zod';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import { config } from '../../config';
|
||||
|
||||
const generateApiKeySchema = z.object({
|
||||
name: z
|
||||
@@ -18,6 +19,9 @@ export class ApiKeyController {
|
||||
private userService = new UserService();
|
||||
public generateApiKey = async (req: Request, res: Response) => {
|
||||
try {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { name, expiresInDays } = generateApiKeySchema.parse(req.body);
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
@@ -58,6 +62,9 @@ export class ApiKeyController {
|
||||
};
|
||||
|
||||
public deleteApiKey = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { id } = req.params;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
|
||||
@@ -59,17 +59,26 @@ export class ArchivedEmailController {
|
||||
};
|
||||
|
||||
public deleteArchivedEmail = async (req: Request, res: Response): Promise<Response> => {
|
||||
// Guard: return 400 if deletion is disabled in system settings before touching anything else
|
||||
try {
|
||||
checkDeletionEnabled();
|
||||
const { id } = req.params;
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
} catch (error) {
|
||||
return res.status(400).json({
|
||||
message: error instanceof Error ? error.message : req.t('errors.deletionDisabled'),
|
||||
});
|
||||
}
|
||||
|
||||
const { id } = req.params;
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
|
||||
try {
|
||||
await ArchivedEmailService.deleteArchivedEmail(id, actor, req.ip || 'unknown');
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
@@ -78,6 +87,10 @@ export class ArchivedEmailController {
|
||||
if (error.message === 'Archived email not found') {
|
||||
return res.status(404).json({ message: req.t('archivedEmail.notFound') });
|
||||
}
|
||||
// Retention policy / legal hold blocks are user-facing 400 errors
|
||||
if (error.message.startsWith('Deletion blocked by retention policy')) {
|
||||
return res.status(400).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
|
||||
@@ -177,6 +177,31 @@ export class IngestionController {
|
||||
}
|
||||
};
|
||||
|
||||
public unmerge = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const updatedSource = await IngestionService.unmerge(id, actor, req.ip || 'unknown');
|
||||
const safeSource = this.toSafeIngestionSource(updatedSource);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
logger.error({ err: error }, `Unmerge ingestion source ${req.params.id} error`);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
} else if (error instanceof Error) {
|
||||
return res.status(400).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public triggerForceSync = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
|
||||
@@ -3,24 +3,96 @@ import { StorageService } from '../../services/StorageService';
|
||||
import { randomUUID } from 'crypto';
|
||||
import busboy from 'busboy';
|
||||
import { config } from '../../config/index';
|
||||
import { logger } from '../../config/logger';
|
||||
import i18next from 'i18next';
|
||||
|
||||
export const uploadFile = async (req: Request, res: Response) => {
|
||||
const storage = new StorageService();
|
||||
const bb = busboy({ headers: req.headers });
|
||||
const uploads: Promise<void>[] = [];
|
||||
let filePath = '';
|
||||
let originalFilename = '';
|
||||
let headersSent = false;
|
||||
const contentLength = req.headers['content-length'];
|
||||
|
||||
bb.on('file', (fieldname, file, filename) => {
|
||||
originalFilename = filename.filename;
|
||||
logger.info({ contentLength, contentType: req.headers['content-type'] }, 'File upload started');
|
||||
|
||||
const sendErrorResponse = (statusCode: number, message: string) => {
|
||||
if (!headersSent) {
|
||||
headersSent = true;
|
||||
res.status(statusCode).json({
|
||||
status: 'error',
|
||||
statusCode,
|
||||
message,
|
||||
errors: null,
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let bb: busboy.Busboy;
|
||||
try {
|
||||
bb = busboy({ headers: req.headers });
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : i18next.t('upload.invalid_request');
|
||||
logger.error({ error: message }, 'Failed to initialize file upload parser');
|
||||
sendErrorResponse(400, i18next.t('upload.invalid_request'));
|
||||
return;
|
||||
}
|
||||
|
||||
bb.on('file', (fieldname, file, info) => {
|
||||
originalFilename = info.filename;
|
||||
const uuid = randomUUID();
|
||||
filePath = `${config.storage.openArchiverFolderName}/tmp/${uuid}-${originalFilename}`;
|
||||
|
||||
logger.info({ filename: originalFilename, fieldname }, 'Receiving file stream');
|
||||
|
||||
file.on('error', (err) => {
|
||||
logger.error(
|
||||
{ error: err.message, filename: originalFilename },
|
||||
'File stream error during upload'
|
||||
);
|
||||
sendErrorResponse(500, i18next.t('upload.stream_error'));
|
||||
});
|
||||
|
||||
uploads.push(storage.put(filePath, file));
|
||||
});
|
||||
|
||||
bb.on('error', (err: Error) => {
|
||||
logger.error({ error: err.message }, 'Upload parsing error');
|
||||
sendErrorResponse(500, i18next.t('upload.parse_error'));
|
||||
});
|
||||
|
||||
bb.on('finish', async () => {
|
||||
await Promise.all(uploads);
|
||||
res.json({ filePath });
|
||||
try {
|
||||
await Promise.all(uploads);
|
||||
if (!headersSent) {
|
||||
headersSent = true;
|
||||
logger.info(
|
||||
{ filePath, filename: originalFilename },
|
||||
'File upload completed successfully'
|
||||
);
|
||||
res.json({ filePath });
|
||||
}
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : 'Unknown storage error';
|
||||
logger.error(
|
||||
{ error: message, filename: originalFilename, filePath },
|
||||
'Failed to write uploaded file to storage'
|
||||
);
|
||||
sendErrorResponse(500, i18next.t('upload.storage_error'));
|
||||
}
|
||||
});
|
||||
|
||||
// Handle client disconnection mid-upload
|
||||
req.on('error', (err) => {
|
||||
logger.warn(
|
||||
{ error: err.message, filename: originalFilename },
|
||||
'Client connection error during upload'
|
||||
);
|
||||
sendErrorResponse(499, i18next.t('upload.connection_error'));
|
||||
});
|
||||
|
||||
req.on('aborted', () => {
|
||||
logger.warn({ filename: originalFilename }, 'Client aborted upload');
|
||||
});
|
||||
|
||||
req.pipe(bb);
|
||||
|
||||
@@ -3,6 +3,7 @@ import { UserService } from '../../services/UserService';
|
||||
import * as schema from '../../database/schema';
|
||||
import { sql } from 'drizzle-orm';
|
||||
import { db } from '../../database';
|
||||
import { config } from '../../config';
|
||||
|
||||
const userService = new UserService();
|
||||
|
||||
@@ -92,6 +93,9 @@ export const getProfile = async (req: Request, res: Response) => {
|
||||
};
|
||||
|
||||
export const updateProfile = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { email, first_name, last_name } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
@@ -111,6 +115,9 @@ export const updateProfile = async (req: Request, res: Response) => {
|
||||
};
|
||||
|
||||
export const updatePassword = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { currentPassword, newPassword } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
|
||||
@@ -7,8 +7,127 @@ export const apiKeyRoutes = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
const controller = new ApiKeyController();
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/api-keys:
|
||||
* post:
|
||||
* summary: Generate an API key
|
||||
* description: >
|
||||
* Generates a new API key for the authenticated user. The raw key value is only returned once at creation time.
|
||||
* The key name must be between 1–255 characters. Expiry is required and must be within 730 days (2 years).
|
||||
* Disabled in demo mode.
|
||||
* operationId: generateApiKey
|
||||
* tags:
|
||||
* - API Keys
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - name
|
||||
* - expiresInDays
|
||||
* properties:
|
||||
* name:
|
||||
* type: string
|
||||
* minLength: 1
|
||||
* maxLength: 255
|
||||
* example: "CI/CD Pipeline Key"
|
||||
* expiresInDays:
|
||||
* type: integer
|
||||
* minimum: 1
|
||||
* maximum: 730
|
||||
* example: 90
|
||||
* responses:
|
||||
* '201':
|
||||
* description: API key created. The raw `key` value is only shown once.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* key:
|
||||
* type: string
|
||||
* description: The raw API key. Store this securely — it will not be shown again.
|
||||
* example: "oa_live_abc123..."
|
||||
* '400':
|
||||
* description: Validation error (name too short/long, expiry out of range).
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ValidationError'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* description: Disabled in demo mode.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
* get:
|
||||
* summary: List API keys
|
||||
* description: Returns all API keys belonging to the currently authenticated user. The raw key value is not included.
|
||||
* operationId: getApiKeys
|
||||
* tags:
|
||||
* - API Keys
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of API keys (without raw key values).
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/ApiKey'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
*/
|
||||
router.post('/', requireAuth(authService), controller.generateApiKey);
|
||||
router.get('/', requireAuth(authService), controller.getApiKeys);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/api-keys/{id}:
|
||||
* delete:
|
||||
* summary: Delete an API key
|
||||
* description: Permanently revokes and deletes an API key by ID. Only the owning user can delete their own keys. Disabled in demo mode.
|
||||
* operationId: deleteApiKey
|
||||
* tags:
|
||||
* - API Keys
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* description: The ID of the API key to delete.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '204':
|
||||
* description: API key deleted. No content returned.
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* description: Disabled in demo mode.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.delete('/:id', requireAuth(authService), controller.deleteApiKey);
|
||||
|
||||
return router;
|
||||
|
||||
@@ -13,12 +13,126 @@ export const createArchivedEmailRouter = (
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/archived-emails/ingestion-source/{ingestionSourceId}:
|
||||
* get:
|
||||
* summary: List archived emails for an ingestion source
|
||||
* description: Returns a paginated list of archived emails belonging to the specified ingestion source. Requires `read:archive` permission.
|
||||
* operationId: getArchivedEmails
|
||||
* tags:
|
||||
* - Archived Emails
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: ingestionSourceId
|
||||
* in: path
|
||||
* required: true
|
||||
* description: The ID of the ingestion source to retrieve emails for.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* - name: page
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Page number for pagination.
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 1
|
||||
* example: 1
|
||||
* - name: limit
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Number of items per page.
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 10
|
||||
* example: 10
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Paginated list of archived emails.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/PaginatedArchivedEmails'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get(
|
||||
'/ingestion-source/:ingestionSourceId',
|
||||
requirePermission('read', 'archive'),
|
||||
archivedEmailController.getArchivedEmails
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/archived-emails/{id}:
|
||||
* get:
|
||||
* summary: Get a single archived email
|
||||
* description: Retrieves the full details of a single archived email by ID, including attachments and thread. Requires `read:archive` permission.
|
||||
* operationId: getArchivedEmailById
|
||||
* tags:
|
||||
* - Archived Emails
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* description: The ID of the archived email.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Archived email details.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ArchivedEmail'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
* delete:
|
||||
* summary: Delete an archived email
|
||||
* description: Permanently deletes an archived email by ID. Deletion must be enabled in system settings and the email must not be on legal hold. Requires `delete:archive` permission.
|
||||
* operationId: deleteArchivedEmail
|
||||
* tags:
|
||||
* - Archived Emails
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* description: The ID of the archived email to delete.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '204':
|
||||
* description: Email deleted successfully. No content returned.
|
||||
* '400':
|
||||
* description: Deletion is disabled in system settings, or the email is blocked by a retention policy / legal hold.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get(
|
||||
'/:id',
|
||||
requirePermission('read', 'archive'),
|
||||
|
||||
@@ -5,23 +5,141 @@ export const createAuthRouter = (authController: AuthController): Router => {
|
||||
const router = Router();
|
||||
|
||||
/**
|
||||
* @route POST /api/v1/auth/setup
|
||||
* @description Creates the initial administrator user.
|
||||
* @access Public
|
||||
* @openapi
|
||||
* /v1/auth/setup:
|
||||
* post:
|
||||
* summary: Initial setup
|
||||
* description: Creates the initial administrator user. Can only be called once when no users exist.
|
||||
* operationId: authSetup
|
||||
* tags:
|
||||
* - Auth
|
||||
* security: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - email
|
||||
* - password
|
||||
* - first_name
|
||||
* - last_name
|
||||
* properties:
|
||||
* email:
|
||||
* type: string
|
||||
* format: email
|
||||
* example: admin@example.com
|
||||
* password:
|
||||
* type: string
|
||||
* format: password
|
||||
* example: "securepassword123"
|
||||
* first_name:
|
||||
* type: string
|
||||
* example: Admin
|
||||
* last_name:
|
||||
* type: string
|
||||
* example: User
|
||||
* responses:
|
||||
* '201':
|
||||
* description: Admin user created and logged in successfully.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/LoginResponse'
|
||||
* '400':
|
||||
* description: All fields are required.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '403':
|
||||
* description: Setup has already been completed (users already exist).
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post('/setup', authController.setup);
|
||||
|
||||
/**
|
||||
* @route POST /api/v1/auth/login
|
||||
* @description Authenticates a user and returns a JWT.
|
||||
* @access Public
|
||||
* @openapi
|
||||
* /v1/auth/login:
|
||||
* post:
|
||||
* summary: Login
|
||||
* description: Authenticates a user with email and password and returns a JWT access token.
|
||||
* operationId: authLogin
|
||||
* tags:
|
||||
* - Auth
|
||||
* security: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - email
|
||||
* - password
|
||||
* properties:
|
||||
* email:
|
||||
* type: string
|
||||
* format: email
|
||||
* example: user@example.com
|
||||
* password:
|
||||
* type: string
|
||||
* format: password
|
||||
* example: "securepassword123"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Authentication successful.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/LoginResponse'
|
||||
* '400':
|
||||
* description: Email and password are required.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* description: Invalid credentials.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post('/login', authController.login);
|
||||
|
||||
/**
|
||||
* @route GET /api/v1/auth/status
|
||||
* @description Checks if the application has been set up.
|
||||
* @access Public
|
||||
* @openapi
|
||||
* /v1/auth/status:
|
||||
* get:
|
||||
* summary: Check setup status
|
||||
* description: Returns whether the application has been set up (i.e., whether an admin user exists).
|
||||
* operationId: authStatus
|
||||
* tags:
|
||||
* - Auth
|
||||
* security: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Setup status returned.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* needsSetup:
|
||||
* type: boolean
|
||||
* description: True if no admin user exists and setup is required.
|
||||
* example: false
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/status', authController.status);
|
||||
|
||||
|
||||
@@ -9,26 +9,168 @@ export const createDashboardRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/dashboard/stats:
|
||||
* get:
|
||||
* summary: Get dashboard stats
|
||||
* description: Returns high-level statistics including total archived emails, total storage used, and failed ingestions in the last 7 days. Requires `read:dashboard` permission.
|
||||
* operationId: getDashboardStats
|
||||
* tags:
|
||||
* - Dashboard
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Dashboard statistics.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/DashboardStats'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.get(
|
||||
'/stats',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getStats
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/dashboard/ingestion-history:
|
||||
* get:
|
||||
* summary: Get ingestion history
|
||||
* description: Returns time-series data of email ingestion counts for the last 30 days. Requires `read:dashboard` permission.
|
||||
* operationId: getIngestionHistory
|
||||
* tags:
|
||||
* - Dashboard
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Ingestion history wrapped in a `history` array.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* history:
|
||||
* type: array
|
||||
* items:
|
||||
* type: object
|
||||
* properties:
|
||||
* date:
|
||||
* type: string
|
||||
* format: date-time
|
||||
* description: Truncated to day precision (UTC).
|
||||
* count:
|
||||
* type: integer
|
||||
* required:
|
||||
* - history
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.get(
|
||||
'/ingestion-history',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionHistory
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/dashboard/ingestion-sources:
|
||||
* get:
|
||||
* summary: Get ingestion source summaries
|
||||
* description: Returns a summary list of ingestion sources with their storage usage. Requires `read:dashboard` permission.
|
||||
* operationId: getDashboardIngestionSources
|
||||
* tags:
|
||||
* - Dashboard
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of ingestion source summaries.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/IngestionSourceStats'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.get(
|
||||
'/ingestion-sources',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionSources
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/dashboard/recent-syncs:
|
||||
* get:
|
||||
* summary: Get recent sync activity
|
||||
* description: Returns the most recent sync sessions across all ingestion sources. Requires `read:dashboard` permission.
|
||||
* operationId: getRecentSyncs
|
||||
* tags:
|
||||
* - Dashboard
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of recent sync sessions.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/RecentSync'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.get(
|
||||
'/recent-syncs',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getRecentSyncs
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/dashboard/indexed-insights:
|
||||
* get:
|
||||
* summary: Get indexed email insights
|
||||
* description: Returns top-sender statistics from the search index. Requires `read:dashboard` permission.
|
||||
* operationId: getIndexedInsights
|
||||
* tags:
|
||||
* - Dashboard
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Indexed email insights.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/IndexedInsights'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.get(
|
||||
'/indexed-insights',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
|
||||
@@ -10,16 +10,116 @@ export const createIamRouter = (iamController: IamController, authService: AuthS
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @route GET /api/v1/iam/roles
|
||||
* @description Gets all roles.
|
||||
* @access Private
|
||||
* @openapi
|
||||
* /v1/iam/roles:
|
||||
* get:
|
||||
* summary: List all roles
|
||||
* description: Returns all IAM roles. If predefined roles do not yet exist, they are created automatically. Requires `read:roles` permission.
|
||||
* operationId: getRoles
|
||||
* tags:
|
||||
* - IAM
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of roles.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/Role'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/roles', requirePermission('read', 'roles'), iamController.getRoles);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/iam/roles/{id}:
|
||||
* get:
|
||||
* summary: Get a role
|
||||
* description: Returns a single IAM role by ID. Requires `read:roles` permission.
|
||||
* operationId: getRoleById
|
||||
* tags:
|
||||
* - IAM
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Role details.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Role'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/roles/:id', requirePermission('read', 'roles'), iamController.getRoleById);
|
||||
|
||||
/**
|
||||
* Only super admin has the ability to modify existing roles or create new roles.
|
||||
* @openapi
|
||||
* /v1/iam/roles:
|
||||
* post:
|
||||
* summary: Create a role
|
||||
* description: Creates a new IAM role with the given name and CASL policies. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: createRole
|
||||
* tags:
|
||||
* - IAM
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - name
|
||||
* - policies
|
||||
* properties:
|
||||
* name:
|
||||
* type: string
|
||||
* example: "Compliance Officer"
|
||||
* policies:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/CaslPolicy'
|
||||
* responses:
|
||||
* '201':
|
||||
* description: Role created.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Role'
|
||||
* '400':
|
||||
* description: Missing fields or invalid policy.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post(
|
||||
'/roles',
|
||||
@@ -27,12 +127,94 @@ export const createIamRouter = (iamController: IamController, authService: AuthS
|
||||
iamController.createRole
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/iam/roles/{id}:
|
||||
* delete:
|
||||
* summary: Delete a role
|
||||
* description: Permanently deletes an IAM role. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: deleteRole
|
||||
* tags:
|
||||
* - IAM
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '204':
|
||||
* description: Role deleted. No content returned.
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.delete(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.deleteRole
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/iam/roles/{id}:
|
||||
* put:
|
||||
* summary: Update a role
|
||||
* description: Updates the name or policies of an IAM role. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: updateRole
|
||||
* tags:
|
||||
* - IAM
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* name:
|
||||
* type: string
|
||||
* example: "Senior Compliance Officer"
|
||||
* policies:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/CaslPolicy'
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Updated role.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/Role'
|
||||
* '400':
|
||||
* description: No update fields provided or invalid policy.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.put(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
|
||||
@@ -13,29 +13,321 @@ export const createIngestionRouter = (
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources:
|
||||
* post:
|
||||
* summary: Create an ingestion source
|
||||
* description: Creates a new ingestion source and validates the connection. Returns the created source without credentials. Requires `create:ingestion` permission.
|
||||
* operationId: createIngestionSource
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/CreateIngestionSourceDto'
|
||||
* responses:
|
||||
* '201':
|
||||
* description: Ingestion source created successfully.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '400':
|
||||
* description: Invalid input or connection test failed.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* get:
|
||||
* summary: List ingestion sources
|
||||
* description: Returns all ingestion sources accessible to the authenticated user. Credentials are excluded from the response. Requires `read:ingestion` permission.
|
||||
* operationId: listIngestionSources
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Array of ingestion sources.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post('/', requirePermission('create', 'ingestion'), ingestionController.create);
|
||||
|
||||
router.get('/', requirePermission('read', 'ingestion'), ingestionController.findAll);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources/{id}:
|
||||
* get:
|
||||
* summary: Get an ingestion source
|
||||
* description: Returns a single ingestion source by ID. Credentials are excluded. Requires `read:ingestion` permission.
|
||||
* operationId: getIngestionSourceById
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Ingestion source details.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
* put:
|
||||
* summary: Update an ingestion source
|
||||
* description: Updates configuration for an existing ingestion source. Requires `update:ingestion` permission.
|
||||
* operationId: updateIngestionSource
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/UpdateIngestionSourceDto'
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Updated ingestion source.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
* delete:
|
||||
* summary: Delete an ingestion source
|
||||
* description: Permanently deletes an ingestion source. Deletion must be enabled in system settings. Requires `delete:ingestion` permission.
|
||||
* operationId: deleteIngestionSource
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '204':
|
||||
* description: Ingestion source deleted. No content returned.
|
||||
* '400':
|
||||
* description: Deletion disabled or constraint error.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/:id', requirePermission('read', 'ingestion'), ingestionController.findById);
|
||||
|
||||
router.put('/:id', requirePermission('update', 'ingestion'), ingestionController.update);
|
||||
|
||||
router.delete('/:id', requirePermission('delete', 'ingestion'), ingestionController.delete);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources/{id}/import:
|
||||
* post:
|
||||
* summary: Trigger initial import
|
||||
* description: Enqueues an initial import job for the ingestion source. This imports all historical emails. Requires `create:ingestion` permission.
|
||||
* operationId: triggerInitialImport
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '202':
|
||||
* description: Initial import job accepted and queued.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/MessageResponse'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post(
|
||||
'/:id/import',
|
||||
requirePermission('create', 'ingestion'),
|
||||
ingestionController.triggerInitialImport
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources/{id}/pause:
|
||||
* post:
|
||||
* summary: Pause an ingestion source
|
||||
* description: Sets the ingestion source status to `paused`, stopping continuous sync. Requires `update:ingestion` permission.
|
||||
* operationId: pauseIngestionSource
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Ingestion source paused. Returns the updated source.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post('/:id/pause', requirePermission('update', 'ingestion'), ingestionController.pause);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources/{id}/sync:
|
||||
* post:
|
||||
* summary: Force sync
|
||||
* description: Triggers an out-of-schedule continuous sync for the ingestion source. Requires `sync:ingestion` permission.
|
||||
* operationId: triggerForceSync
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '202':
|
||||
* description: Force sync job accepted and queued.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/MessageResponse'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post(
|
||||
'/:id/sync',
|
||||
requirePermission('sync', 'ingestion'),
|
||||
ingestionController.triggerForceSync
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/ingestion-sources/{id}/unmerge:
|
||||
* post:
|
||||
* summary: Unmerge a child ingestion source
|
||||
* description: Detaches a child source from its merge group, making it a standalone root source. Requires `update:ingestion` permission.
|
||||
* operationId: unmergeIngestionSource
|
||||
* tags:
|
||||
* - Ingestion
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Source unmerged. Returns the updated source.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SafeIngestionSource'
|
||||
* '400':
|
||||
* description: Source is not merged into another source.
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
*/
|
||||
router.post(
|
||||
'/:id/unmerge',
|
||||
requirePermission('update', 'ingestion'),
|
||||
ingestionController.unmerge
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
|
||||
@@ -10,6 +10,49 @@ export const integrityRoutes = (authService: AuthService): Router => {
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/integrity/{id}:
|
||||
* get:
|
||||
* summary: Check email integrity
|
||||
* description: Verifies the SHA-256 hash of an archived email and all its attachments against the hashes stored at archival time. Returns per-item integrity results. Requires `read:archive` permission.
|
||||
* operationId: checkIntegrity
|
||||
* tags:
|
||||
* - Integrity
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* description: UUID of the archived email to verify.
|
||||
* schema:
|
||||
* type: string
|
||||
* format: uuid
|
||||
* example: "550e8400-e29b-41d4-a716-446655440000"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Integrity check results for the email and its attachments.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/IntegrityCheckResult'
|
||||
* '400':
|
||||
* description: Invalid UUID format.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ValidationError'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/:id', requirePermission('read', 'archive'), controller.checkIntegrity);
|
||||
|
||||
return router;
|
||||
|
||||
@@ -10,11 +10,121 @@ export const createJobsRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/jobs/queues:
|
||||
* get:
|
||||
* summary: List all queues
|
||||
* description: Returns all BullMQ job queues and their current job counts broken down by status. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: getQueues
|
||||
* tags:
|
||||
* - Jobs
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of queue overviews.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* queues:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/QueueOverview'
|
||||
* example:
|
||||
* queues:
|
||||
* - name: ingestion
|
||||
* counts:
|
||||
* active: 0
|
||||
* completed: 56
|
||||
* failed: 4
|
||||
* delayed: 3
|
||||
* waiting: 0
|
||||
* paused: 0
|
||||
* - name: indexing
|
||||
* counts:
|
||||
* active: 0
|
||||
* completed: 0
|
||||
* failed: 0
|
||||
* delayed: 0
|
||||
* waiting: 0
|
||||
* paused: 0
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get(
|
||||
'/queues',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
jobsController.getQueues
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/jobs/queues/{queueName}:
|
||||
* get:
|
||||
* summary: Get jobs in a queue
|
||||
* description: Returns a paginated list of jobs within a specific queue, filtered by status. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: getQueueJobs
|
||||
* tags:
|
||||
* - Jobs
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: queueName
|
||||
* in: path
|
||||
* required: true
|
||||
* description: The name of the queue (e.g. `ingestion` or `indexing`).
|
||||
* schema:
|
||||
* type: string
|
||||
* example: ingestion
|
||||
* - name: status
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Filter jobs by status.
|
||||
* schema:
|
||||
* type: string
|
||||
* enum: [active, completed, failed, delayed, waiting, paused]
|
||||
* default: failed
|
||||
* - name: page
|
||||
* in: query
|
||||
* required: false
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 1
|
||||
* - name: limit
|
||||
* in: query
|
||||
* required: false
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 10
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Detailed view of the queue including paginated jobs.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/QueueDetails'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '404':
|
||||
* description: Queue not found.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get(
|
||||
'/queues/:queueName',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
|
||||
@@ -12,6 +12,68 @@ export const createSearchRouter = (
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/search:
|
||||
* get:
|
||||
* summary: Search archived emails
|
||||
* description: Performs a full-text search across indexed archived emails using Meilisearch. Requires `search:archive` permission.
|
||||
* operationId: searchEmails
|
||||
* tags:
|
||||
* - Search
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: keywords
|
||||
* in: query
|
||||
* required: true
|
||||
* description: The search query string.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "invoice Q4"
|
||||
* - name: page
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Page number for pagination.
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 1
|
||||
* example: 1
|
||||
* - name: limit
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Number of results per page.
|
||||
* schema:
|
||||
* type: integer
|
||||
* default: 10
|
||||
* example: 10
|
||||
* - name: matchingStrategy
|
||||
* in: query
|
||||
* required: false
|
||||
* description: Meilisearch matching strategy. `last` returns results containing at least one keyword; `all` requires all keywords; `frequency` sorts by keyword frequency.
|
||||
* schema:
|
||||
* type: string
|
||||
* enum: [last, all, frequency]
|
||||
* default: last
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Search results.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SearchResults'
|
||||
* '400':
|
||||
* description: Keywords parameter is required.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/', requirePermission('search', 'archive'), searchController.search);
|
||||
|
||||
return router;
|
||||
|
||||
@@ -7,10 +7,56 @@ import { AuthService } from '../../services/AuthService';
|
||||
export const createSettingsRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
// Public route to get non-sensitive settings. settings read should not be scoped with a permission because all end users need the settings data in the frontend. However, for sensitive settings data, we need to add a new permission subject to limit access. So this route should only expose non-sensitive settings data.
|
||||
/**
|
||||
* @returns SystemSettings
|
||||
* @openapi
|
||||
* /v1/settings/system:
|
||||
* get:
|
||||
* summary: Get system settings
|
||||
* description: >
|
||||
* Returns non-sensitive system settings such as language, timezone, and feature flags.
|
||||
* This endpoint is public — no authentication required. Sensitive settings are never exposed.
|
||||
* operationId: getSystemSettings
|
||||
* tags:
|
||||
* - Settings
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Current system settings.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SystemSettings'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
* put:
|
||||
* summary: Update system settings
|
||||
* description: Updates system settings. Requires `manage:settings` permission.
|
||||
* operationId: updateSystemSettings
|
||||
* tags:
|
||||
* - Settings
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SystemSettings'
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Updated system settings.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/SystemSettings'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
// Public route to get non-sensitive settings. All end users need the settings data in the frontend.
|
||||
router.get('/system', settingsController.getSystemSettings);
|
||||
|
||||
// Protected route to update settings
|
||||
|
||||
@@ -13,6 +13,60 @@ export const createStorageRouter = (
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/storage/download:
|
||||
* get:
|
||||
* summary: Download a stored file
|
||||
* description: >
|
||||
* Downloads a file from the configured storage backend (local filesystem or S3-compatible).
|
||||
* The path is sanitized to prevent directory traversal attacks.
|
||||
* Requires `read:archive` permission.
|
||||
* operationId: downloadFile
|
||||
* tags:
|
||||
* - Storage
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: path
|
||||
* in: query
|
||||
* required: true
|
||||
* description: The relative storage path of the file to download.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "open-archiver/emails/abc123.eml"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: The file content as a binary stream. The `Content-Disposition` header is set to trigger a browser download.
|
||||
* headers:
|
||||
* Content-Disposition:
|
||||
* description: Attachment filename.
|
||||
* schema:
|
||||
* type: string
|
||||
* example: 'attachment; filename="abc123.eml"'
|
||||
* content:
|
||||
* application/octet-stream:
|
||||
* schema:
|
||||
* type: string
|
||||
* format: binary
|
||||
* '400':
|
||||
* description: File path is required or invalid.
|
||||
* content:
|
||||
* text/plain:
|
||||
* schema:
|
||||
* type: string
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* description: File not found in storage.
|
||||
* content:
|
||||
* text/plain:
|
||||
* schema:
|
||||
* type: string
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.get('/download', requirePermission('read', 'archive'), storageController.downloadFile);
|
||||
|
||||
return router;
|
||||
|
||||
@@ -9,6 +9,55 @@ export const createUploadRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/upload:
|
||||
* post:
|
||||
* summary: Upload a file
|
||||
* description: >
|
||||
* Uploads a file (PST, EML, MBOX, or other) to temporary storage for subsequent use in an ingestion source.
|
||||
* Returns the storage path, which should be passed as `uploadedFilePath` when creating a file-based ingestion source.
|
||||
* Requires `create:ingestion` permission.
|
||||
* operationId: uploadFile
|
||||
* tags:
|
||||
* - Upload
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* multipart/form-data:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* file:
|
||||
* type: string
|
||||
* format: binary
|
||||
* description: The file to upload.
|
||||
* responses:
|
||||
* '200':
|
||||
* description: File uploaded successfully. Returns the storage path.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* filePath:
|
||||
* type: string
|
||||
* description: The storage path of the uploaded file. Use this as `uploadedFilePath` when creating a file-based ingestion source.
|
||||
* example: "open-archiver/tmp/uuid-filename.pst"
|
||||
* '400':
|
||||
* description: Invalid multipart request.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '500':
|
||||
* $ref: '#/components/responses/InternalServerError'
|
||||
*/
|
||||
router.post('/', requirePermission('create', 'ingestion'), uploadFile);
|
||||
|
||||
return router;
|
||||
|
||||
@@ -9,16 +9,235 @@ export const createUserRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users:
|
||||
* get:
|
||||
* summary: List all users
|
||||
* description: Returns all user accounts in the system. Requires `read:users` permission.
|
||||
* operationId: getUsers
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: List of users.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: array
|
||||
* items:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
*/
|
||||
router.get('/', requirePermission('read', 'users'), userController.getUsers);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users/profile:
|
||||
* get:
|
||||
* summary: Get current user profile
|
||||
* description: Returns the profile of the currently authenticated user.
|
||||
* operationId: getProfile
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Current user's profile.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
* patch:
|
||||
* summary: Update current user profile
|
||||
* description: Updates the email, first name, or last name of the currently authenticated user. Disabled in demo mode.
|
||||
* operationId: updateProfile
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* email:
|
||||
* type: string
|
||||
* format: email
|
||||
* first_name:
|
||||
* type: string
|
||||
* last_name:
|
||||
* type: string
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Updated user profile.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* description: Disabled in demo mode.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
*/
|
||||
router.get('/profile', userController.getProfile);
|
||||
router.patch('/profile', userController.updateProfile);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users/profile/password:
|
||||
* post:
|
||||
* summary: Update password
|
||||
* description: Updates the password of the currently authenticated user. The current password must be provided for verification. Disabled in demo mode.
|
||||
* operationId: updatePassword
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - currentPassword
|
||||
* - newPassword
|
||||
* properties:
|
||||
* currentPassword:
|
||||
* type: string
|
||||
* format: password
|
||||
* newPassword:
|
||||
* type: string
|
||||
* format: password
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Password updated successfully.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/MessageResponse'
|
||||
* '400':
|
||||
* description: Current password is incorrect.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* description: Disabled in demo mode.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
*/
|
||||
router.post('/profile/password', userController.updatePassword);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users/{id}:
|
||||
* get:
|
||||
* summary: Get a user
|
||||
* description: Returns a single user by ID. Requires `read:users` permission.
|
||||
* operationId: getUser
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '200':
|
||||
* description: User details.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
*/
|
||||
router.get('/:id', requirePermission('read', 'users'), userController.getUser);
|
||||
|
||||
/**
|
||||
* Only super admin has the ability to modify existing users or create new users.
|
||||
* @openapi
|
||||
* /v1/users:
|
||||
* post:
|
||||
* summary: Create a user
|
||||
* description: Creates a new user account and optionally assigns a role. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: createUser
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* required:
|
||||
* - email
|
||||
* - first_name
|
||||
* - last_name
|
||||
* - password
|
||||
* properties:
|
||||
* email:
|
||||
* type: string
|
||||
* format: email
|
||||
* example: jane.doe@example.com
|
||||
* first_name:
|
||||
* type: string
|
||||
* example: Jane
|
||||
* last_name:
|
||||
* type: string
|
||||
* example: Doe
|
||||
* password:
|
||||
* type: string
|
||||
* format: password
|
||||
* example: "securepassword123"
|
||||
* roleId:
|
||||
* type: string
|
||||
* description: Optional role ID to assign to the user.
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '201':
|
||||
* description: User created.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.post(
|
||||
'/',
|
||||
@@ -26,12 +245,94 @@ export const createUserRouter = (authService: AuthService): Router => {
|
||||
userController.createUser
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users/{id}:
|
||||
* put:
|
||||
* summary: Update a user
|
||||
* description: Updates a user's email, name, or role assignment. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: updateUser
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* requestBody:
|
||||
* required: true
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* type: object
|
||||
* properties:
|
||||
* email:
|
||||
* type: string
|
||||
* format: email
|
||||
* first_name:
|
||||
* type: string
|
||||
* last_name:
|
||||
* type: string
|
||||
* roleId:
|
||||
* type: string
|
||||
* responses:
|
||||
* '200':
|
||||
* description: Updated user.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/User'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
* '404':
|
||||
* $ref: '#/components/responses/NotFound'
|
||||
*/
|
||||
router.put(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.updateUser
|
||||
);
|
||||
|
||||
/**
|
||||
* @openapi
|
||||
* /v1/users/{id}:
|
||||
* delete:
|
||||
* summary: Delete a user
|
||||
* description: Permanently deletes a user. Cannot delete the last remaining user. Requires `manage:all` (Super Admin) permission.
|
||||
* operationId: deleteUser
|
||||
* tags:
|
||||
* - Users
|
||||
* security:
|
||||
* - bearerAuth: []
|
||||
* - apiKeyAuth: []
|
||||
* parameters:
|
||||
* - name: id
|
||||
* in: path
|
||||
* required: true
|
||||
* schema:
|
||||
* type: string
|
||||
* example: "clx1y2z3a0000b4d2"
|
||||
* responses:
|
||||
* '204':
|
||||
* description: User deleted. No content returned.
|
||||
* '400':
|
||||
* description: Cannot delete the only remaining user.
|
||||
* content:
|
||||
* application/json:
|
||||
* schema:
|
||||
* $ref: '#/components/schemas/ErrorMessage'
|
||||
* '401':
|
||||
* $ref: '#/components/responses/Unauthorized'
|
||||
* '403':
|
||||
* $ref: '#/components/responses/Forbidden'
|
||||
*/
|
||||
router.delete(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
|
||||
@@ -158,13 +158,12 @@ export async function createServer(modules: ArchiverModule[] = []): Promise<Expr
|
||||
// Load all provided extension modules
|
||||
for (const module of modules) {
|
||||
await module.initialize(app, authService);
|
||||
console.log(`🏢 Enterprise module loaded: ${module.name}`);
|
||||
logger.info(`🏢 Enterprise module loaded: ${module.name}`);
|
||||
}
|
||||
app.get('/', (req, res) => {
|
||||
res.send('Backend is running!!');
|
||||
});
|
||||
|
||||
console.log('✅ Core OSS modules loaded.');
|
||||
logger.info('✅ Core OSS modules loaded.');
|
||||
|
||||
return app;
|
||||
}
|
||||
|
||||
@@ -7,4 +7,5 @@ export const app = {
|
||||
syncFrequency: process.env.SYNC_FREQUENCY || '* * * * *', //default to 1 minute
|
||||
enableDeletion: process.env.ENABLE_DELETION === 'true',
|
||||
allInclusiveArchive: process.env.ALL_INCLUSIVE_ARCHIVE === 'true',
|
||||
isDemo: process.env.IS_DEMO === 'true',
|
||||
};
|
||||
|
||||
@@ -0,0 +1,51 @@
|
||||
CREATE TABLE "email_legal_holds" (
|
||||
"email_id" uuid NOT NULL,
|
||||
"legal_hold_id" uuid NOT NULL,
|
||||
CONSTRAINT "email_legal_holds_email_id_legal_hold_id_pk" PRIMARY KEY("email_id","legal_hold_id")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "email_retention_labels" (
|
||||
"email_id" uuid NOT NULL,
|
||||
"label_id" uuid NOT NULL,
|
||||
"applied_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"applied_by_user_id" uuid,
|
||||
CONSTRAINT "email_retention_labels_email_id_label_id_pk" PRIMARY KEY("email_id","label_id")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "retention_events" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"event_name" varchar(255) NOT NULL,
|
||||
"event_type" varchar(100) NOT NULL,
|
||||
"event_timestamp" timestamp with time zone NOT NULL,
|
||||
"target_criteria" jsonb NOT NULL,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "retention_labels" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" varchar(255) NOT NULL,
|
||||
"retention_period_days" integer NOT NULL,
|
||||
"description" text,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP CONSTRAINT "legal_holds_custodian_id_custodians_id_fk";
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP CONSTRAINT "legal_holds_case_id_ediscovery_cases_id_fk";
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ALTER COLUMN "case_id" DROP NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ADD COLUMN "name" varchar(255) NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ADD COLUMN "is_active" boolean DEFAULT true NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ADD COLUMN "created_at" timestamp with time zone DEFAULT now() NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ADD COLUMN "updated_at" timestamp with time zone DEFAULT now() NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "email_legal_holds" ADD CONSTRAINT "email_legal_holds_email_id_archived_emails_id_fk" FOREIGN KEY ("email_id") REFERENCES "public"."archived_emails"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "email_legal_holds" ADD CONSTRAINT "email_legal_holds_legal_hold_id_legal_holds_id_fk" FOREIGN KEY ("legal_hold_id") REFERENCES "public"."legal_holds"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "email_retention_labels" ADD CONSTRAINT "email_retention_labels_email_id_archived_emails_id_fk" FOREIGN KEY ("email_id") REFERENCES "public"."archived_emails"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "email_retention_labels" ADD CONSTRAINT "email_retention_labels_label_id_retention_labels_id_fk" FOREIGN KEY ("label_id") REFERENCES "public"."retention_labels"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "email_retention_labels" ADD CONSTRAINT "email_retention_labels_applied_by_user_id_users_id_fk" FOREIGN KEY ("applied_by_user_id") REFERENCES "public"."users"("id") ON DELETE no action ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" ADD CONSTRAINT "legal_holds_case_id_ediscovery_cases_id_fk" FOREIGN KEY ("case_id") REFERENCES "public"."ediscovery_cases"("id") ON DELETE set null ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP COLUMN "custodian_id";--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP COLUMN "hold_criteria";--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP COLUMN "applied_by_identifier";--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP COLUMN "applied_at";--> statement-breakpoint
|
||||
ALTER TABLE "legal_holds" DROP COLUMN "removed_at";
|
||||
@@ -0,0 +1,3 @@
|
||||
ALTER TYPE "public"."audit_log_target_type" ADD VALUE 'RetentionPolicy' BEFORE 'Role';--> statement-breakpoint
|
||||
ALTER TYPE "public"."audit_log_target_type" ADD VALUE 'SystemEvent' BEFORE 'SystemSettings';--> statement-breakpoint
|
||||
ALTER TABLE "retention_policies" ADD COLUMN "ingestion_scope" jsonb DEFAULT 'null'::jsonb;
|
||||
@@ -0,0 +1,6 @@
|
||||
ALTER TYPE "public"."audit_log_target_type" ADD VALUE 'RetentionLabel' BEFORE 'Role';--> statement-breakpoint
|
||||
ALTER TYPE "public"."audit_log_target_type" ADD VALUE 'LegalHold' BEFORE 'Role';--> statement-breakpoint
|
||||
ALTER TABLE "email_legal_holds" ADD COLUMN "applied_at" timestamp with time zone DEFAULT now() NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "email_legal_holds" ADD COLUMN "applied_by_user_id" uuid;--> statement-breakpoint
|
||||
ALTER TABLE "retention_labels" ADD COLUMN "is_disabled" boolean DEFAULT false NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "email_legal_holds" ADD CONSTRAINT "email_legal_holds_applied_by_user_id_users_id_fk" FOREIGN KEY ("applied_by_user_id") REFERENCES "public"."users"("id") ON DELETE no action ON UPDATE no action;
|
||||
@@ -0,0 +1,12 @@
|
||||
CREATE TABLE "sync_sessions" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"ingestion_source_id" uuid NOT NULL,
|
||||
"is_initial_import" boolean DEFAULT false NOT NULL,
|
||||
"total_mailboxes" integer DEFAULT 0 NOT NULL,
|
||||
"completed_mailboxes" integer DEFAULT 0 NOT NULL,
|
||||
"failed_mailboxes" integer DEFAULT 0 NOT NULL,
|
||||
"error_messages" text[] DEFAULT '{}' NOT NULL,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "sync_sessions" ADD CONSTRAINT "sync_sessions_ingestion_source_id_ingestion_sources_id_fk" FOREIGN KEY ("ingestion_source_id") REFERENCES "public"."ingestion_sources"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "sync_sessions" ADD COLUMN "last_activity_at" timestamp with time zone DEFAULT now() NOT NULL;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "archived_emails" ADD COLUMN "provider_message_id" text;--> statement-breakpoint
|
||||
CREATE INDEX "provider_msg_source_idx" ON "archived_emails" USING btree ("provider_message_id","ingestion_source_id");
|
||||
@@ -0,0 +1,20 @@
|
||||
CREATE TYPE "public"."journaling_source_status" AS ENUM('active', 'paused');--> statement-breakpoint
|
||||
ALTER TYPE "public"."ingestion_provider" ADD VALUE 'smtp_journaling';--> statement-breakpoint
|
||||
ALTER TYPE "public"."audit_log_target_type" ADD VALUE 'JournalingSource' BEFORE 'RetentionPolicy';--> statement-breakpoint
|
||||
CREATE TABLE "journaling_sources" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"allowed_ips" jsonb NOT NULL,
|
||||
"require_tls" boolean DEFAULT true NOT NULL,
|
||||
"smtp_username" text,
|
||||
"smtp_password_hash" text,
|
||||
"status" "journaling_source_status" DEFAULT 'active' NOT NULL,
|
||||
"ingestion_source_id" uuid NOT NULL,
|
||||
"routing_address" text NOT NULL,
|
||||
"total_received" integer DEFAULT 0 NOT NULL,
|
||||
"last_received_at" timestamp with time zone,
|
||||
"created_at" timestamp with time zone DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp with time zone DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "journaling_sources" ADD CONSTRAINT "journaling_sources_ingestion_source_id_ingestion_sources_id_fk" FOREIGN KEY ("ingestion_source_id") REFERENCES "public"."ingestion_sources"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "ingestion_sources" ADD COLUMN "preserve_original_file" boolean DEFAULT false NOT NULL;
|
||||
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE "archived_emails" ADD COLUMN "is_journaled" boolean DEFAULT false;--> statement-breakpoint
|
||||
ALTER TABLE "ingestion_sources" ADD COLUMN "merged_into_id" uuid;--> statement-breakpoint
|
||||
CREATE INDEX "idx_merged_into" ON "ingestion_sources" USING btree ("merged_into_id");
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "ingestion_sources" ADD CONSTRAINT "ingestion_sources_merged_into_id_ingestion_sources_id_fk" FOREIGN KEY ("merged_into_id") REFERENCES "public"."ingestion_sources"("id") ON DELETE set null ON UPDATE no action;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TYPE "public"."ingestion_status" ADD VALUE 'partially_active';
|
||||
1460
packages/backend/src/database/migrations/meta/0024_snapshot.json
Normal file
1460
packages/backend/src/database/migrations/meta/0024_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1469
packages/backend/src/database/migrations/meta/0025_snapshot.json
Normal file
1469
packages/backend/src/database/migrations/meta/0025_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1500
packages/backend/src/database/migrations/meta/0026_snapshot.json
Normal file
1500
packages/backend/src/database/migrations/meta/0026_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1578
packages/backend/src/database/migrations/meta/0027_snapshot.json
Normal file
1578
packages/backend/src/database/migrations/meta/0027_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1585
packages/backend/src/database/migrations/meta/0028_snapshot.json
Normal file
1585
packages/backend/src/database/migrations/meta/0028_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1612
packages/backend/src/database/migrations/meta/0029_snapshot.json
Normal file
1612
packages/backend/src/database/migrations/meta/0029_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1727
packages/backend/src/database/migrations/meta/0030_snapshot.json
Normal file
1727
packages/backend/src/database/migrations/meta/0030_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1734
packages/backend/src/database/migrations/meta/0031_snapshot.json
Normal file
1734
packages/backend/src/database/migrations/meta/0031_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1763
packages/backend/src/database/migrations/meta/0032_snapshot.json
Normal file
1763
packages/backend/src/database/migrations/meta/0032_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1772
packages/backend/src/database/migrations/meta/0033_snapshot.json
Normal file
1772
packages/backend/src/database/migrations/meta/0033_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1773
packages/backend/src/database/migrations/meta/0034_snapshot.json
Normal file
1773
packages/backend/src/database/migrations/meta/0034_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -169,6 +169,83 @@
|
||||
"when": 1760354094610,
|
||||
"tag": "0023_swift_swordsman",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 24,
|
||||
"version": "7",
|
||||
"when": 1772842674479,
|
||||
"tag": "0024_careful_black_panther",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 25,
|
||||
"version": "7",
|
||||
"when": 1773013461190,
|
||||
"tag": "0025_peaceful_grim_reaper",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 26,
|
||||
"version": "7",
|
||||
"when": 1773326266420,
|
||||
"tag": "0026_pink_fantastic_four",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 27,
|
||||
"version": "7",
|
||||
"when": 1773768709477,
|
||||
"tag": "0027_black_morph",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 28,
|
||||
"version": "7",
|
||||
"when": 1773770326402,
|
||||
"tag": "0028_youthful_kitty_pryde",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 29,
|
||||
"version": "7",
|
||||
"when": 1773927678269,
|
||||
"tag": "0029_lethal_brood",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 30,
|
||||
"version": "7",
|
||||
"when": 1774440788278,
|
||||
"tag": "0030_strong_ultron",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 31,
|
||||
"version": "7",
|
||||
"when": 1774623960683,
|
||||
"tag": "0031_bouncy_boomerang",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 32,
|
||||
"version": "7",
|
||||
"when": 1774709286830,
|
||||
"tag": "0032_exotic_the_twelve",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 33,
|
||||
"version": "7",
|
||||
"when": 1774719684064,
|
||||
"tag": "0033_adorable_lockheed",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 34,
|
||||
"version": "7",
|
||||
"when": 1774900882674,
|
||||
"tag": "0034_stiff_toad",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -9,3 +9,5 @@ export * from './schema/system-settings';
|
||||
export * from './schema/api-keys';
|
||||
export * from './schema/audit-logs';
|
||||
export * from './schema/enums';
|
||||
export * from './schema/sync-sessions';
|
||||
export * from './schema/journaling-sources';
|
||||
|
||||
@@ -12,6 +12,9 @@ export const archivedEmails = pgTable(
|
||||
.references(() => ingestionSources.id, { onDelete: 'cascade' }),
|
||||
userEmail: text('user_email').notNull(),
|
||||
messageIdHeader: text('message_id_header'),
|
||||
/** The provider-specific message ID (e.g., Gmail API ID, Graph API ID).
|
||||
* Used by the pre-fetch duplicate check to avoid unnecessary API calls during retries. */
|
||||
providerMessageId: text('provider_message_id'),
|
||||
sentAt: timestamp('sent_at', { withTimezone: true }).notNull(),
|
||||
subject: text('subject'),
|
||||
senderName: text('sender_name'),
|
||||
@@ -23,11 +26,15 @@ export const archivedEmails = pgTable(
|
||||
isIndexed: boolean('is_indexed').notNull().default(false),
|
||||
hasAttachments: boolean('has_attachments').notNull().default(false),
|
||||
isOnLegalHold: boolean('is_on_legal_hold').notNull().default(false),
|
||||
isJournaled: boolean('is_journaled').default(false),
|
||||
archivedAt: timestamp('archived_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
path: text('path'),
|
||||
tags: jsonb('tags'),
|
||||
},
|
||||
(table) => [index('thread_id_idx').on(table.threadId)]
|
||||
(table) => [
|
||||
index('thread_id_idx').on(table.threadId),
|
||||
index('provider_msg_source_idx').on(table.providerMessageId, table.ingestionSourceId),
|
||||
]
|
||||
);
|
||||
|
||||
export const archivedEmailsRelations = relations(archivedEmails, ({ one }) => ({
|
||||
|
||||
@@ -5,11 +5,14 @@ import {
|
||||
jsonb,
|
||||
pgEnum,
|
||||
pgTable,
|
||||
primaryKey,
|
||||
text,
|
||||
timestamp,
|
||||
uuid,
|
||||
varchar,
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { custodians } from './custodians';
|
||||
import { archivedEmails } from './archived-emails';
|
||||
import { users } from './users';
|
||||
|
||||
// --- Enums ---
|
||||
|
||||
@@ -29,10 +32,48 @@ export const retentionPolicies = pgTable('retention_policies', {
|
||||
actionOnExpiry: retentionActionEnum('action_on_expiry').notNull(),
|
||||
isEnabled: boolean('is_enabled').notNull().default(true),
|
||||
conditions: jsonb('conditions'),
|
||||
/**
|
||||
* Array of ingestion source UUIDs this policy is restricted to.
|
||||
* null means the policy applies to all ingestion sources.
|
||||
*/
|
||||
ingestionScope: jsonb('ingestion_scope').$type<string[] | null>().default(null),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const retentionLabels = pgTable('retention_labels', {
|
||||
id: uuid('id').defaultRandom().primaryKey(),
|
||||
name: varchar('name', { length: 255 }).notNull(),
|
||||
retentionPeriodDays: integer('retention_period_days').notNull(),
|
||||
description: text('description'),
|
||||
isDisabled: boolean('is_disabled').notNull().default(false),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const emailRetentionLabels = pgTable(
|
||||
'email_retention_labels',
|
||||
{
|
||||
emailId: uuid('email_id')
|
||||
.references(() => archivedEmails.id, { onDelete: 'cascade' })
|
||||
.notNull(),
|
||||
labelId: uuid('label_id')
|
||||
.references(() => retentionLabels.id, { onDelete: 'cascade' })
|
||||
.notNull(),
|
||||
appliedAt: timestamp('applied_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
appliedByUserId: uuid('applied_by_user_id').references(() => users.id),
|
||||
},
|
||||
(t) => [primaryKey({ columns: [t.emailId, t.labelId] })]
|
||||
);
|
||||
|
||||
export const retentionEvents = pgTable('retention_events', {
|
||||
id: uuid('id').defaultRandom().primaryKey(),
|
||||
eventName: varchar('event_name', { length: 255 }).notNull(),
|
||||
eventType: varchar('event_type', { length: 100 }).notNull(),
|
||||
eventTimestamp: timestamp('event_timestamp', { withTimezone: true }).notNull(),
|
||||
targetCriteria: jsonb('target_criteria').notNull(),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const ediscoveryCases = pgTable('ediscovery_cases', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
name: text('name').notNull().unique(),
|
||||
@@ -44,18 +85,31 @@ export const ediscoveryCases = pgTable('ediscovery_cases', {
|
||||
});
|
||||
|
||||
export const legalHolds = pgTable('legal_holds', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
caseId: uuid('case_id')
|
||||
.notNull()
|
||||
.references(() => ediscoveryCases.id, { onDelete: 'cascade' }),
|
||||
custodianId: uuid('custodian_id').references(() => custodians.id, { onDelete: 'cascade' }),
|
||||
holdCriteria: jsonb('hold_criteria'),
|
||||
id: uuid('id').defaultRandom().primaryKey(),
|
||||
name: varchar('name', { length: 255 }).notNull(),
|
||||
reason: text('reason'),
|
||||
appliedByIdentifier: text('applied_by_identifier').notNull(),
|
||||
appliedAt: timestamp('applied_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
removedAt: timestamp('removed_at', { withTimezone: true }),
|
||||
isActive: boolean('is_active').notNull().default(true),
|
||||
// Optional link to ediscovery cases for backward compatibility or future use
|
||||
caseId: uuid('case_id').references(() => ediscoveryCases.id, { onDelete: 'set null' }),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const emailLegalHolds = pgTable(
|
||||
'email_legal_holds',
|
||||
{
|
||||
emailId: uuid('email_id')
|
||||
.references(() => archivedEmails.id, { onDelete: 'cascade' })
|
||||
.notNull(),
|
||||
legalHoldId: uuid('legal_hold_id')
|
||||
.references(() => legalHolds.id, { onDelete: 'cascade' })
|
||||
.notNull(),
|
||||
appliedAt: timestamp('applied_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
appliedByUserId: uuid('applied_by_user_id').references(() => users.id),
|
||||
},
|
||||
(t) => [primaryKey({ columns: [t.emailId, t.legalHoldId] })]
|
||||
);
|
||||
|
||||
export const exportJobs = pgTable('export_jobs', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
caseId: uuid('case_id').references(() => ediscoveryCases.id, { onDelete: 'set null' }),
|
||||
@@ -70,20 +124,51 @@ export const exportJobs = pgTable('export_jobs', {
|
||||
|
||||
// --- Relations ---
|
||||
|
||||
export const ediscoveryCasesRelations = relations(ediscoveryCases, ({ many }) => ({
|
||||
legalHolds: many(legalHolds),
|
||||
exportJobs: many(exportJobs),
|
||||
export const retentionPoliciesRelations = relations(retentionPolicies, ({ many }) => ({
|
||||
// Add relations if needed
|
||||
}));
|
||||
|
||||
export const legalHoldsRelations = relations(legalHolds, ({ one }) => ({
|
||||
export const retentionLabelsRelations = relations(retentionLabels, ({ many }) => ({
|
||||
emailRetentionLabels: many(emailRetentionLabels),
|
||||
}));
|
||||
|
||||
export const emailRetentionLabelsRelations = relations(emailRetentionLabels, ({ one }) => ({
|
||||
label: one(retentionLabels, {
|
||||
fields: [emailRetentionLabels.labelId],
|
||||
references: [retentionLabels.id],
|
||||
}),
|
||||
email: one(archivedEmails, {
|
||||
fields: [emailRetentionLabels.emailId],
|
||||
references: [archivedEmails.id],
|
||||
}),
|
||||
appliedByUser: one(users, {
|
||||
fields: [emailRetentionLabels.appliedByUserId],
|
||||
references: [users.id],
|
||||
}),
|
||||
}));
|
||||
|
||||
export const legalHoldsRelations = relations(legalHolds, ({ one, many }) => ({
|
||||
emailLegalHolds: many(emailLegalHolds),
|
||||
ediscoveryCase: one(ediscoveryCases, {
|
||||
fields: [legalHolds.caseId],
|
||||
references: [ediscoveryCases.id],
|
||||
}),
|
||||
custodian: one(custodians, {
|
||||
fields: [legalHolds.custodianId],
|
||||
references: [custodians.id],
|
||||
}));
|
||||
|
||||
export const emailLegalHoldsRelations = relations(emailLegalHolds, ({ one }) => ({
|
||||
legalHold: one(legalHolds, {
|
||||
fields: [emailLegalHolds.legalHoldId],
|
||||
references: [legalHolds.id],
|
||||
}),
|
||||
email: one(archivedEmails, {
|
||||
fields: [emailLegalHolds.emailId],
|
||||
references: [archivedEmails.id],
|
||||
}),
|
||||
}));
|
||||
|
||||
export const ediscoveryCasesRelations = relations(ediscoveryCases, ({ many }) => ({
|
||||
legalHolds: many(legalHolds),
|
||||
exportJobs: many(exportJobs),
|
||||
}));
|
||||
|
||||
export const exportJobsRelations = relations(exportJobs, ({ one }) => ({
|
||||
|
||||
@@ -1,4 +1,14 @@
|
||||
import { jsonb, pgEnum, pgTable, text, timestamp, uuid } from 'drizzle-orm/pg-core';
|
||||
import {
|
||||
boolean,
|
||||
index,
|
||||
jsonb,
|
||||
pgEnum,
|
||||
pgTable,
|
||||
text,
|
||||
timestamp,
|
||||
uuid,
|
||||
type AnyPgColumn,
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { users } from './users';
|
||||
import { relations } from 'drizzle-orm';
|
||||
|
||||
@@ -9,6 +19,7 @@ export const ingestionProviderEnum = pgEnum('ingestion_provider', [
|
||||
'pst_import',
|
||||
'eml_import',
|
||||
'mbox_import',
|
||||
'smtp_journaling',
|
||||
]);
|
||||
|
||||
export const ingestionStatusEnum = pgEnum('ingestion_status', [
|
||||
@@ -20,26 +31,47 @@ export const ingestionStatusEnum = pgEnum('ingestion_status', [
|
||||
'importing',
|
||||
'auth_success',
|
||||
'imported',
|
||||
'partially_active',
|
||||
]);
|
||||
|
||||
export const ingestionSources = pgTable('ingestion_sources', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
userId: uuid('user_id').references(() => users.id, { onDelete: 'cascade' }),
|
||||
name: text('name').notNull(),
|
||||
provider: ingestionProviderEnum('provider').notNull(),
|
||||
credentials: text('credentials'),
|
||||
status: ingestionStatusEnum('status').notNull().default('pending_auth'),
|
||||
lastSyncStartedAt: timestamp('last_sync_started_at', { withTimezone: true }),
|
||||
lastSyncFinishedAt: timestamp('last_sync_finished_at', { withTimezone: true }),
|
||||
lastSyncStatusMessage: text('last_sync_status_message'),
|
||||
syncState: jsonb('sync_state'),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
export const ingestionSources = pgTable(
|
||||
'ingestion_sources',
|
||||
{
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
userId: uuid('user_id').references(() => users.id, { onDelete: 'cascade' }),
|
||||
name: text('name').notNull(),
|
||||
provider: ingestionProviderEnum('provider').notNull(),
|
||||
credentials: text('credentials'),
|
||||
status: ingestionStatusEnum('status').notNull().default('pending_auth'),
|
||||
lastSyncStartedAt: timestamp('last_sync_started_at', { withTimezone: true }),
|
||||
lastSyncFinishedAt: timestamp('last_sync_finished_at', { withTimezone: true }),
|
||||
lastSyncStatusMessage: text('last_sync_status_message'),
|
||||
syncState: jsonb('sync_state'),
|
||||
preserveOriginalFile: boolean('preserve_original_file').notNull().default(false),
|
||||
/** Self-referencing FK for merge groups. When set, this source is a child
|
||||
* whose emails are logically grouped with the root source. Flat hierarchy only. */
|
||||
mergedIntoId: uuid('merged_into_id').references((): AnyPgColumn => ingestionSources.id, {
|
||||
onDelete: 'set null',
|
||||
}),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
},
|
||||
(table) => [index('idx_merged_into').on(table.mergedIntoId)]
|
||||
);
|
||||
|
||||
export const ingestionSourcesRelations = relations(ingestionSources, ({ one }) => ({
|
||||
export const ingestionSourcesRelations = relations(ingestionSources, ({ one, many }) => ({
|
||||
user: one(users, {
|
||||
fields: [ingestionSources.userId],
|
||||
references: [users.id],
|
||||
}),
|
||||
/** The root source this child is merged into (null if this is a root). */
|
||||
mergedInto: one(ingestionSources, {
|
||||
fields: [ingestionSources.mergedIntoId],
|
||||
references: [ingestionSources.id],
|
||||
relationName: 'mergedChildren',
|
||||
}),
|
||||
/** Child sources that are merged into this root. */
|
||||
children: many(ingestionSources, {
|
||||
relationName: 'mergedChildren',
|
||||
}),
|
||||
}));
|
||||
|
||||
47
packages/backend/src/database/schema/journaling-sources.ts
Normal file
47
packages/backend/src/database/schema/journaling-sources.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import {
|
||||
boolean,
|
||||
integer,
|
||||
jsonb,
|
||||
pgEnum,
|
||||
pgTable,
|
||||
text,
|
||||
timestamp,
|
||||
uuid,
|
||||
} from 'drizzle-orm/pg-core';
|
||||
import { relations } from 'drizzle-orm';
|
||||
import { ingestionSources } from './ingestion-sources';
|
||||
|
||||
export const journalingSourceStatusEnum = pgEnum('journaling_source_status', ['active', 'paused']);
|
||||
|
||||
export const journalingSources = pgTable('journaling_sources', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
name: text('name').notNull(),
|
||||
/** CIDR blocks or IP addresses allowed to send journal reports */
|
||||
allowedIps: jsonb('allowed_ips').notNull().$type<string[]>(),
|
||||
/** Whether to reject non-TLS connections (GDPR compliance) */
|
||||
requireTls: boolean('require_tls').notNull().default(true),
|
||||
/** Optional SMTP AUTH username */
|
||||
smtpUsername: text('smtp_username'),
|
||||
/** Bcrypt-hashed SMTP AUTH password */
|
||||
smtpPasswordHash: text('smtp_password_hash'),
|
||||
status: journalingSourceStatusEnum('status').notNull().default('active'),
|
||||
/** The backing ingestion source that owns all archived emails */
|
||||
ingestionSourceId: uuid('ingestion_source_id')
|
||||
.notNull()
|
||||
.references(() => ingestionSources.id, { onDelete: 'cascade' }),
|
||||
/** Persisted SMTP routing address generated at creation time (immutable unless regenerated) */
|
||||
routingAddress: text('routing_address').notNull(),
|
||||
/** Running count of emails received via this journaling endpoint */
|
||||
totalReceived: integer('total_received').notNull().default(0),
|
||||
/** Timestamp of the last email received */
|
||||
lastReceivedAt: timestamp('last_received_at', { withTimezone: true }),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
updatedAt: timestamp('updated_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const journalingSourcesRelations = relations(journalingSources, ({ one }) => ({
|
||||
ingestionSource: one(ingestionSources, {
|
||||
fields: [journalingSources.ingestionSourceId],
|
||||
references: [ingestionSources.id],
|
||||
}),
|
||||
}));
|
||||
36
packages/backend/src/database/schema/sync-sessions.ts
Normal file
36
packages/backend/src/database/schema/sync-sessions.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { boolean, integer, pgTable, text, timestamp, uuid } from 'drizzle-orm/pg-core';
|
||||
import { ingestionSources } from './ingestion-sources';
|
||||
import { relations } from 'drizzle-orm';
|
||||
|
||||
/**
|
||||
* Tracks the progress of a single sync cycle (initial import or continuous sync).
|
||||
* Used as the coordination layer to replace BullMQ FlowProducer parent/child tracking.
|
||||
* Each process-mailbox job atomically increments completed/failed counters here,
|
||||
* and the last job to finish dispatches the sync-cycle-finished job.
|
||||
*/
|
||||
export const syncSessions = pgTable('sync_sessions', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
ingestionSourceId: uuid('ingestion_source_id')
|
||||
.notNull()
|
||||
.references(() => ingestionSources.id, { onDelete: 'cascade' }),
|
||||
isInitialImport: boolean('is_initial_import').notNull().default(false),
|
||||
totalMailboxes: integer('total_mailboxes').notNull().default(0),
|
||||
completedMailboxes: integer('completed_mailboxes').notNull().default(0),
|
||||
failedMailboxes: integer('failed_mailboxes').notNull().default(0),
|
||||
/** Aggregated error messages from all failed process-mailbox jobs */
|
||||
errorMessages: text('error_messages').array().notNull().default([]),
|
||||
createdAt: timestamp('created_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
/**
|
||||
* Updated each time a process-mailbox job reports its result.
|
||||
* Used to detect genuinely stuck sessions (no activity for N minutes) vs.
|
||||
* large imports that are still actively running.
|
||||
*/
|
||||
lastActivityAt: timestamp('last_activity_at', { withTimezone: true }).notNull().defaultNow(),
|
||||
});
|
||||
|
||||
export const syncSessionsRelations = relations(syncSessions, ({ one }) => ({
|
||||
ingestionSource: one(ingestionSources, {
|
||||
fields: [syncSessions.ingestionSourceId],
|
||||
references: [ingestionSources.id],
|
||||
}),
|
||||
}));
|
||||
@@ -1,7 +1,16 @@
|
||||
import { config } from '../config';
|
||||
import i18next from 'i18next';
|
||||
|
||||
export function checkDeletionEnabled() {
|
||||
interface DeletionOptions {
|
||||
allowSystemDelete?: boolean;
|
||||
}
|
||||
|
||||
export function checkDeletionEnabled(options?: DeletionOptions) {
|
||||
// If system delete is allowed (e.g. by retention policy), bypass the config check
|
||||
if (options?.allowSystemDelete) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!config.app.enableDeletion) {
|
||||
const errorMessage = i18next.t('Deletion is disabled for this instance.');
|
||||
throw new Error(errorMessage);
|
||||
|
||||
218
packages/backend/src/helpers/emlUtils.ts
Normal file
218
packages/backend/src/helpers/emlUtils.ts
Normal file
@@ -0,0 +1,218 @@
|
||||
import { simpleParser, type Attachment } from 'mailparser';
|
||||
import MailComposer from 'nodemailer/lib/mail-composer';
|
||||
import type Mail from 'nodemailer/lib/mailer';
|
||||
import { logger } from '../config/logger';
|
||||
|
||||
/**
|
||||
* Set of headers that are either handled natively by nodemailer's MailComposer
|
||||
* via dedicated options, or are structural MIME headers that will be regenerated
|
||||
* when the MIME tree is rebuilt.
|
||||
*/
|
||||
const HEADERS_HANDLED_BY_COMPOSER = new Set([
|
||||
'content-type',
|
||||
'content-transfer-encoding',
|
||||
'mime-version',
|
||||
'from',
|
||||
'to',
|
||||
'cc',
|
||||
'bcc',
|
||||
'subject',
|
||||
'message-id',
|
||||
'date',
|
||||
'in-reply-to',
|
||||
'references',
|
||||
'reply-to',
|
||||
'sender',
|
||||
]);
|
||||
|
||||
/**
|
||||
* Determines whether a parsed attachment should be preserved in the stored .eml.
|
||||
*
|
||||
* An attachment is considered inline if:
|
||||
* 1. mailparser explicitly marked it as related (embedded in multipart/related)
|
||||
* 2. It has Content-Disposition: inline AND a Content-ID
|
||||
* 3. Its Content-ID is referenced as a cid: URL in the HTML body
|
||||
*
|
||||
* All three checks are evaluated with OR logic (conservative: keep if any match).
|
||||
*/
|
||||
function isInlineAttachment(attachment: Attachment, referencedCids: Set<string>): boolean {
|
||||
// Signal 1: mailparser marks embedded multipart/related resources
|
||||
if (attachment.related === true) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (attachment.cid) {
|
||||
const normalizedCid = attachment.cid.toLowerCase();
|
||||
|
||||
// Signal 2: explicitly marked inline with a CID
|
||||
if (attachment.contentDisposition === 'inline') {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Signal 3: CID is actively referenced in the HTML body
|
||||
if (referencedCids.has(normalizedCid)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts cid: references from an HTML string.
|
||||
* Matches patterns like src="cid:abc123" in img tags or CSS backgrounds.
|
||||
*
|
||||
* @returns A Set of normalized (lowercased) CID values without the "cid:" prefix.
|
||||
*/
|
||||
function extractCidReferences(html: string): Set<string> {
|
||||
const cidPattern = /\bcid:([^\s"'>]+)/gi;
|
||||
const cids = new Set<string>();
|
||||
let match: RegExpExecArray | null;
|
||||
while ((match = cidPattern.exec(html)) !== null) {
|
||||
cids.add(match[1].toLowerCase());
|
||||
}
|
||||
return cids;
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts additional headers from the parsed email's header map that are NOT
|
||||
* handled natively by nodemailer's MailComposer dedicated options.
|
||||
* These are passed through as custom headers to preserve the original email metadata.
|
||||
*/
|
||||
function extractAdditionalHeaders(
|
||||
headers: Map<string, unknown>
|
||||
): Array<{ key: string; value: string }> {
|
||||
const result: Array<{ key: string; value: string }> = [];
|
||||
|
||||
for (const [key, value] of headers) {
|
||||
if (HEADERS_HANDLED_BY_COMPOSER.has(key.toLowerCase())) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
result.push({ key, value });
|
||||
} else if (Array.isArray(value)) {
|
||||
// Headers like 'received' can appear multiple times
|
||||
for (const item of value) {
|
||||
if (typeof item === 'string') {
|
||||
result.push({ key, value: item });
|
||||
} else if (item && typeof item === 'object' && 'value' in item) {
|
||||
result.push({ key, value: String(item.value) });
|
||||
}
|
||||
}
|
||||
} else if (value && typeof value === 'object' && 'value' in value) {
|
||||
// Structured headers like { value: '...', params: {...} }
|
||||
result.push({ key, value: String((value as { value: string }).value) });
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts a mailparser AddressObject or AddressObject[] to a comma-separated string
|
||||
* suitable for nodemailer's MailComposer options.
|
||||
*/
|
||||
function addressToString(
|
||||
addresses: import('mailparser').AddressObject | import('mailparser').AddressObject[] | undefined
|
||||
): string | undefined {
|
||||
if (!addresses) return undefined;
|
||||
const arr = Array.isArray(addresses) ? addresses : [addresses];
|
||||
return arr.map((a) => a.text).join(', ') || undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Strips non-inline attachments from a raw .eml buffer to avoid double-storing
|
||||
* attachment data (since attachments are already stored separately).
|
||||
*
|
||||
* Inline images referenced via cid: in the HTML body are preserved so that
|
||||
* the email renders correctly when viewed.
|
||||
*
|
||||
* If the email has no strippable attachments, the original buffer is returned
|
||||
* unchanged (zero overhead).
|
||||
*
|
||||
* If re-serialization fails for any reason, the original buffer is returned
|
||||
* and a warning is logged — email ingestion is never blocked by this function.
|
||||
*
|
||||
* @param emlBuffer The raw .eml file as a Buffer.
|
||||
* @returns A new Buffer with non-inline attachments removed, or the original if nothing was stripped.
|
||||
*/
|
||||
export async function stripAttachmentsFromEml(emlBuffer: Buffer): Promise<Buffer> {
|
||||
try {
|
||||
const parsed = await simpleParser(emlBuffer);
|
||||
|
||||
// If there are no attachments at all, return early
|
||||
if (!parsed.attachments || parsed.attachments.length === 0) {
|
||||
return emlBuffer;
|
||||
}
|
||||
|
||||
// Build the set of cid values referenced in the HTML body
|
||||
const htmlBody = parsed.html || '';
|
||||
const referencedCids = extractCidReferences(htmlBody);
|
||||
|
||||
// Check if there's anything to strip
|
||||
const hasStrippableAttachments = parsed.attachments.some(
|
||||
(a) => !isInlineAttachment(a, referencedCids)
|
||||
);
|
||||
|
||||
if (!hasStrippableAttachments) {
|
||||
return emlBuffer;
|
||||
}
|
||||
|
||||
// Build the list of inline attachments to preserve in the .eml
|
||||
const inlineAttachments: Mail.Attachment[] = [];
|
||||
for (const attachment of parsed.attachments) {
|
||||
if (isInlineAttachment(attachment, referencedCids)) {
|
||||
inlineAttachments.push({
|
||||
content: attachment.content,
|
||||
contentType: attachment.contentType,
|
||||
contentDisposition: 'inline' as const,
|
||||
filename: attachment.filename || undefined,
|
||||
cid: attachment.cid || undefined,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Collect additional headers not handled by MailComposer's dedicated fields
|
||||
const additionalHeaders = extractAdditionalHeaders(parsed.headers);
|
||||
|
||||
// Build the mail options for MailComposer
|
||||
const mailOptions: Mail.Options = {
|
||||
from: addressToString(parsed.from),
|
||||
to: addressToString(parsed.to),
|
||||
cc: addressToString(parsed.cc),
|
||||
bcc: addressToString(parsed.bcc),
|
||||
replyTo: addressToString(parsed.replyTo),
|
||||
subject: parsed.subject,
|
||||
messageId: parsed.messageId,
|
||||
date: parsed.date,
|
||||
inReplyTo: parsed.inReplyTo,
|
||||
references: Array.isArray(parsed.references)
|
||||
? parsed.references.join(' ')
|
||||
: parsed.references,
|
||||
text: parsed.text || undefined,
|
||||
html: parsed.html || undefined,
|
||||
attachments: inlineAttachments,
|
||||
headers: additionalHeaders,
|
||||
};
|
||||
|
||||
const composer = new MailComposer(mailOptions);
|
||||
const builtMessage = composer.compile();
|
||||
const stream = builtMessage.createReadStream();
|
||||
|
||||
return await new Promise<Buffer>((resolve, reject) => {
|
||||
const chunks: Buffer[] = [];
|
||||
stream.on('data', (chunk: Buffer) => chunks.push(chunk));
|
||||
stream.on('end', () => resolve(Buffer.concat(chunks)));
|
||||
stream.on('error', reject);
|
||||
});
|
||||
} catch (error) {
|
||||
// If stripping fails, return the original buffer unchanged.
|
||||
// Email ingestion should never be blocked by an attachment-stripping failure.
|
||||
logger.warn(
|
||||
{ error },
|
||||
'Failed to strip non-inline attachments from .eml — storing original.'
|
||||
);
|
||||
return emlBuffer;
|
||||
}
|
||||
}
|
||||
36
packages/backend/src/hooks/RetentionHook.ts
Normal file
36
packages/backend/src/hooks/RetentionHook.ts
Normal file
@@ -0,0 +1,36 @@
|
||||
import { logger } from '../config/logger';
|
||||
|
||||
export type DeletionCheck = (emailId: string) => Promise<boolean>;
|
||||
|
||||
export class RetentionHook {
|
||||
private static checks: DeletionCheck[] = [];
|
||||
|
||||
/**
|
||||
* Registers a function that checks if an email can be deleted.
|
||||
* The function should return true if deletion is allowed, false otherwise.
|
||||
*/
|
||||
static registerCheck(check: DeletionCheck) {
|
||||
this.checks.push(check);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies if an email can be deleted by running all registered checks.
|
||||
* If ANY check returns false, deletion is blocked.
|
||||
*/
|
||||
static async canDelete(emailId: string): Promise<boolean> {
|
||||
for (const check of this.checks) {
|
||||
try {
|
||||
const allowed = await check(emailId);
|
||||
if (!allowed) {
|
||||
logger.info(`Deletion blocked by retention check for email ${emailId}`);
|
||||
return false;
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Error in retention check for email ${emailId}:`, error);
|
||||
// Fail safe: if a check errors, assume we CANNOT delete to be safe
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -6,5 +6,9 @@ export * from './services/AuditService';
|
||||
export * from './api/middleware/requireAuth';
|
||||
export * from './api/middleware/requirePermission';
|
||||
export { db } from './database';
|
||||
export * as drizzleOrm from 'drizzle-orm';
|
||||
export * from './database/schema';
|
||||
export { AuditService } from './services/AuditService';
|
||||
export * from './config';
|
||||
export * from './jobs/queues';
|
||||
export { RetentionHook } from './hooks/RetentionHook';
|
||||
export { IntegrityService } from './services/IntegrityService';
|
||||
|
||||
@@ -2,7 +2,8 @@ import { Job } from 'bullmq';
|
||||
import { IngestionService } from '../../services/IngestionService';
|
||||
import { IContinuousSyncJob } from '@open-archiver/types';
|
||||
import { EmailProviderFactory } from '../../services/EmailProviderFactory';
|
||||
import { flowProducer } from '../queues';
|
||||
import { ingestionQueue } from '../queues';
|
||||
import { SyncSessionService } from '../../services/SyncSessionService';
|
||||
import { logger } from '../../config/logger';
|
||||
|
||||
export default async (job: Job<IContinuousSyncJob>) => {
|
||||
@@ -26,50 +27,54 @@ export default async (job: Job<IContinuousSyncJob>) => {
|
||||
const connector = EmailProviderFactory.createConnector(source);
|
||||
|
||||
try {
|
||||
const jobs = [];
|
||||
// Phase 1: Collect user emails (async generator — no full buffering of job descriptors).
|
||||
// We need the total count before creating the session so the counter is correct.
|
||||
const userEmails: string[] = [];
|
||||
for await (const user of connector.listAllUsers()) {
|
||||
if (user.primaryEmail) {
|
||||
jobs.push({
|
||||
name: 'process-mailbox',
|
||||
queueName: 'ingestion',
|
||||
data: {
|
||||
ingestionSourceId: source.id,
|
||||
userEmail: user.primaryEmail,
|
||||
},
|
||||
opts: {
|
||||
removeOnComplete: {
|
||||
age: 60 * 10, // 10 minutes
|
||||
},
|
||||
removeOnFail: {
|
||||
age: 60 * 30, // 30 minutes
|
||||
},
|
||||
timeout: 1000 * 60 * 30, // 30 minutes
|
||||
},
|
||||
});
|
||||
userEmails.push(user.primaryEmail);
|
||||
}
|
||||
}
|
||||
// }
|
||||
|
||||
if (jobs.length > 0) {
|
||||
await flowProducer.add({
|
||||
name: 'sync-cycle-finished',
|
||||
queueName: 'ingestion',
|
||||
data: {
|
||||
ingestionSourceId,
|
||||
isInitialImport: false,
|
||||
},
|
||||
children: jobs,
|
||||
opts: {
|
||||
removeOnComplete: true,
|
||||
removeOnFail: true,
|
||||
},
|
||||
if (userEmails.length === 0) {
|
||||
logger.info(
|
||||
{ ingestionSourceId },
|
||||
'No users found during continuous sync, marking active.'
|
||||
);
|
||||
await IngestionService.update(ingestionSourceId, {
|
||||
status: 'active',
|
||||
lastSyncFinishedAt: new Date(),
|
||||
lastSyncStatusMessage: 'Continuous sync complete. No users found.',
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Phase 2: Create a session BEFORE dispatching any jobs.
|
||||
const sessionId = await SyncSessionService.create(
|
||||
ingestionSourceId,
|
||||
userEmails.length,
|
||||
false
|
||||
);
|
||||
|
||||
logger.info(
|
||||
{ ingestionSourceId, userCount: userEmails.length, sessionId },
|
||||
'Dispatching process-mailbox jobs for continuous sync'
|
||||
);
|
||||
|
||||
// Phase 3: Enqueue individual process-mailbox jobs one at a time.
|
||||
// No FlowProducer — each job carries the sessionId for DB-based coordination.
|
||||
for (const userEmail of userEmails) {
|
||||
await ingestionQueue.add('process-mailbox', {
|
||||
ingestionSourceId: source.id,
|
||||
userEmail,
|
||||
sessionId,
|
||||
});
|
||||
}
|
||||
|
||||
// The status will be set back to 'active' by the 'sync-cycle-finished' job
|
||||
// once all the mailboxes have been processed.
|
||||
logger.info(
|
||||
{ ingestionSourceId },
|
||||
{ ingestionSourceId, sessionId },
|
||||
'Continuous sync job finished dispatching mailbox jobs.'
|
||||
);
|
||||
} catch (error) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import { SearchService } from '../../services/SearchService';
|
||||
import { StorageService } from '../../services/StorageService';
|
||||
import { DatabaseService } from '../../services/DatabaseService';
|
||||
import { PendingEmail } from '@open-archiver/types';
|
||||
import { logger } from '@open-archiver/backend/config/logger';
|
||||
|
||||
const searchService = new SearchService();
|
||||
const storageService = new StorageService();
|
||||
@@ -12,6 +13,6 @@ const indexingService = new IndexingService(databaseService, searchService, stor
|
||||
|
||||
export default async function (job: Job<{ emails: PendingEmail[] }>) {
|
||||
const { emails } = job.data;
|
||||
console.log(`Indexing email batch with ${emails.length} emails`);
|
||||
logger.info(`Indexing email batch with ${emails.length} emails`);
|
||||
await indexingService.indexEmailBatch(emails);
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user