mirror of
https://github.com/LogicLabs-OU/OpenArchiver.git
synced 2026-04-06 08:41:57 +02:00
Compare commits
44 Commits
role-based
...
v0.4.3-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1795b76004 | ||
|
|
531eabb96e | ||
|
|
9b303c963e | ||
|
|
a0f8cd5d05 | ||
|
|
9228f64221 | ||
|
|
481a5ce6f9 | ||
|
|
3434e8d6ef | ||
|
|
7dac3b2bfd | ||
|
|
cf121989ae | ||
|
|
2df5c9240d | ||
|
|
24afd13858 | ||
|
|
c2006dfa94 | ||
|
|
399059a773 | ||
|
|
0cff788656 | ||
|
|
ddb4d56107 | ||
|
|
42b0f6e5f1 | ||
|
|
6e1ebbbfd7 | ||
|
|
1e048fdbc1 | ||
|
|
b71dd55e25 | ||
|
|
d372ef7566 | ||
|
|
e9a65f9672 | ||
|
|
ce3f379b7a | ||
|
|
37a778cb6d | ||
|
|
26a760b232 | ||
|
|
6be0774bc4 | ||
|
|
4a23f8f29f | ||
|
|
074256ed59 | ||
|
|
7d178d786b | ||
|
|
4b11cd931a | ||
|
|
0a21ad14cd | ||
|
|
63d3960f79 | ||
|
|
85a526d1b6 | ||
|
|
52a1a11973 | ||
|
|
4048f47777 | ||
|
|
22b173cbe4 | ||
|
|
774b0d7a6b | ||
|
|
85607d2ab3 | ||
|
|
94021eab69 | ||
|
|
faefdac44a | ||
|
|
392f51dabc | ||
|
|
baff1195c7 | ||
|
|
f1da17e484 | ||
|
|
a2c55f36ee | ||
|
|
9fdba4cd61 |
40
.env.example
40
.env.example
@@ -4,8 +4,15 @@
|
||||
NODE_ENV=development
|
||||
PORT_BACKEND=4000
|
||||
PORT_FRONTEND=3000
|
||||
# The public-facing URL of your application. This is used by the backend to configure CORS.
|
||||
APP_URL=http://localhost:3000
|
||||
# This is used by the SvelteKit Node adapter to determine the server's public-facing URL.
|
||||
# It should always be set to the value of APP_URL.
|
||||
ORIGIN=$APP_URL
|
||||
# The frequency of continuous email syncing. Default is every minutes, but you can change it to another value based on your needs.
|
||||
SYNC_FREQUENCY='* * * * *'
|
||||
# Set to 'true' to include Junk and Trash folders in the email archive. Defaults to false.
|
||||
ALL_INCLUSIVE_ARCHIVE=false
|
||||
|
||||
# --- Docker Compose Service Configuration ---
|
||||
# These variables are used by docker-compose.yml to configure the services. Leave them unchanged if you use Docker services for Postgresql, Valkey (Redis) and Meilisearch. If you decide to use your own instances of these services, you can substitute them with your own connection credentials.
|
||||
@@ -19,7 +26,8 @@ DATABASE_URL="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/$
|
||||
# Meilisearch
|
||||
MEILI_MASTER_KEY=aSampleMasterKey
|
||||
MEILI_HOST=http://meilisearch:7700
|
||||
|
||||
# The number of emails to batch together for indexing. Defaults to 500.
|
||||
MEILI_INDEXING_BATCH=500
|
||||
|
||||
|
||||
# Redis (We use Valkey, which is Redis-compatible and open source)
|
||||
@@ -28,6 +36,8 @@ REDIS_PORT=6379
|
||||
REDIS_PASSWORD=defaultredispassword
|
||||
# If you run Valkey service from Docker Compose, set the REDIS_TLS_ENABLED variable to false.
|
||||
REDIS_TLS_ENABLED=false
|
||||
# Redis username. Only required if not using the default user.
|
||||
REDIS_USER=notdefaultuser
|
||||
|
||||
|
||||
# --- Storage Settings ---
|
||||
@@ -39,7 +49,9 @@ BODY_SIZE_LIMIT=100M
|
||||
# --- Local Storage Settings ---
|
||||
# The path inside the container where files will be stored.
|
||||
# This is mapped to a Docker volume for persistence.
|
||||
# This is only used if STORAGE_TYPE is 'local'.
|
||||
# This is not an optional variable, it is where the Open Archiver service stores application data. Set this even if you are using S3 storage.
|
||||
# Make sure the user that runs the Open Archiver service has read and write access to this path.
|
||||
# Important: It is recommended to create this path manually before installation, otherwise you may face permission and ownership problems.
|
||||
STORAGE_LOCAL_ROOT_PATH=/var/data/open-archiver
|
||||
|
||||
# --- S3-Compatible Storage Settings ---
|
||||
@@ -52,19 +64,37 @@ STORAGE_S3_REGION=
|
||||
# Set to 'true' for MinIO and other non-AWS S3 services
|
||||
STORAGE_S3_FORCE_PATH_STYLE=false
|
||||
|
||||
# --- Storage Encryption ---
|
||||
# IMPORTANT: Generate a secure, random 32-byte hex string for this key.
|
||||
# You can use `openssl rand -hex 32` to generate a key.
|
||||
# This key is used for AES-256 encryption of files at rest.
|
||||
# This is an optional variable, if not set, files will not be encrypted.
|
||||
STORAGE_ENCRYPTION_KEY=
|
||||
|
||||
# --- Security & Authentication ---
|
||||
|
||||
# Enable or disable deletion of emails and ingestion sources. Defaults to false.
|
||||
ENABLE_DELETION=false
|
||||
|
||||
# Rate Limiting
|
||||
# The window in milliseconds for which API requests are checked. Defaults to 60000 (1 minute).
|
||||
RATE_LIMIT_WINDOW_MS=60000
|
||||
# The maximum number of API requests allowed from an IP within the window. Defaults to 100.
|
||||
RATE_LIMIT_MAX_REQUESTS=100
|
||||
|
||||
|
||||
|
||||
# JWT
|
||||
# IMPORTANT: Change this to a long, random, and secret string in your .env file
|
||||
JWT_SECRET=a-very-secret-key-that-you-should-change
|
||||
JWT_EXPIRES_IN="7d"
|
||||
|
||||
# Set the credentials for the initial admin user.
|
||||
SUPER_API_KEY=
|
||||
|
||||
# Master Encryption Key for sensitive data (Such as Ingestion source credentials and passwords)
|
||||
# IMPORTANT: Generate a secure, random 32-byte hex string for this
|
||||
# You can use `openssl rand -hex 32` to generate a key.
|
||||
ENCRYPTION_KEY=
|
||||
|
||||
|
||||
# Apache Tika Integration
|
||||
# ONLY active if TIKA_URL is set
|
||||
TIKA_URL=http://tika:9998
|
||||
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
github: [wayneshn]
|
||||
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
33
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: bug
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**System:**
|
||||
|
||||
- Open Archiver Version:
|
||||
|
||||
**Relevant logs:**
|
||||
Any relevant logs (Redact sensitive information)
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
19
.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
**Describe alternatives you've considered**
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
2
.github/workflows/docker-deployment.yml
vendored
2
.github/workflows/docker-deployment.yml
vendored
@@ -35,7 +35,7 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
file: ./apps/open-archiver/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: logiclabshq/open-archiver:${{ steps.sha.outputs.sha }}
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -24,3 +24,7 @@ pnpm-debug.log
|
||||
# Vitepress
|
||||
docs/.vitepress/dist
|
||||
docs/.vitepress/cache
|
||||
|
||||
|
||||
# TS
|
||||
**/tsconfig.tsbuildinfo
|
||||
|
||||
140
LICENSE
140
LICENSE
@@ -200,23 +200,23 @@ You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
- **a)** The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
- **b)** The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section 7.
|
||||
This requirement modifies the requirement in section 4 to
|
||||
“keep intact all notices”.
|
||||
- **c)** You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
- **d)** If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
- **a)** The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
- **b)** The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section 7.
|
||||
This requirement modifies the requirement in section 4 to
|
||||
“keep intact all notices”.
|
||||
- **c)** You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
- **d)** If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
@@ -235,42 +235,42 @@ of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
- **a)** Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
- **b)** Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either **(1)** a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or **(2)** access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
- **c)** Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
- **d)** Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
- **e)** Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
- **a)** Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
- **b)** Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either **(1)** a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or **(2)** access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
- **c)** Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
- **d)** Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
- **e)** Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
@@ -344,23 +344,23 @@ Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
- **a)** Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
- **b)** Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
- **c)** Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
- **d)** Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
- **e)** Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
- **f)** Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
- **a)** Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
- **b)** Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
- **c)** Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
- **d)** Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
- **e)** Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
- **f)** Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered “further
|
||||
restrictions” within the meaning of section 10. If the Program as you
|
||||
|
||||
29
README.md
29
README.md
@@ -7,11 +7,11 @@
|
||||
[](https://redis.io)
|
||||
[](https://svelte.dev/)
|
||||
|
||||
**A secure, sovereign, and open-source platform for email archiving and eDiscovery.**
|
||||
**A secure, sovereign, and open-source platform for email archiving.**
|
||||
|
||||
Open Archiver provides a robust, self-hosted solution for archiving, storing, indexing, and searching emails from major platforms, including Google Workspace (Gmail), Microsoft 365, PST files, as well as generic IMAP-enabled email inboxes. Use Open Archiver to keep a permanent, tamper-proof record of your communication history, free from vendor lock-in.
|
||||
|
||||
## 📸 Screenshots
|
||||
## Screenshots
|
||||
|
||||

|
||||
_Dashboard_
|
||||
@@ -22,9 +22,9 @@ _Archived emails_
|
||||

|
||||
_Full-text search across all your emails and attachments_
|
||||
|
||||
## 👨👩👧👦 Join our community!
|
||||
## Join our community!
|
||||
|
||||
We are committed to build an engaging community around Open Archiver, and we are inviting all of you to join our community on Discord to get real-time support and connect with the team.
|
||||
We are committed to building an engaging community around Open Archiver, and we are inviting all of you to join our community on Discord to get real-time support and connect with the team.
|
||||
|
||||
[](https://discord.gg/MTtD7BhuTQ)
|
||||
|
||||
@@ -34,11 +34,11 @@ We are committed to build an engaging community around Open Archiver, and we are
|
||||
|
||||
Check out the live demo here: https://demo.openarchiver.com
|
||||
|
||||
Username: admin@local.com
|
||||
Username: demo@openarchiver.com
|
||||
|
||||
Password: openarchiver_demo
|
||||
|
||||
## ✨ Key Features
|
||||
## Key Features
|
||||
|
||||
- **Universal Ingestion**: Connect to any email provider to perform initial bulk imports and maintain continuous, real-time synchronization. Ingestion sources include:
|
||||
- IMAP connection
|
||||
@@ -46,15 +46,18 @@ Password: openarchiver_demo
|
||||
- Microsoft 365
|
||||
- PST files
|
||||
- Zipped .eml files
|
||||
- Mbox files
|
||||
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All data is encrypted at rest.
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All files are encrypted at rest.
|
||||
- **Pluggable Storage Backends**: Support both local filesystem storage and S3-compatible object storage (like AWS S3 or MinIO).
|
||||
- **Powerful Search & eDiscovery**: A high-performance search engine indexes the full text of emails and attachments (PDF, DOCX, etc.).
|
||||
- **Thread discovery**: The ability to discover if an email belongs to a thread/conversation and present the context.
|
||||
- **Compliance & Retention**: Define granular retention policies to automatically manage the lifecycle of your data. Place legal holds on communications to prevent deletion during litigation (TBD).
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when (TBD).
|
||||
- **File Hash and Encryption**: Email and attachment file hash values are stored in the meta database upon ingestion, meaning any attempt to alter the file content will be identified, ensuring legal and regulatory compliance.
|
||||
- - Each archived email comes with an "Integrity Report" feature that indicates if the files are original.
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when.
|
||||
|
||||
## 🛠️ Tech Stack
|
||||
## Tech Stack
|
||||
|
||||
Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
|
||||
@@ -65,7 +68,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
- **Database**: PostgreSQL for metadata, user management, and audit logs
|
||||
- **Deployment**: Docker Compose deployment
|
||||
|
||||
## 📦 Deployment
|
||||
## Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@@ -78,7 +81,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/LogicLabs-OU/OpenArchiver.git
|
||||
cd open-archiver
|
||||
cd OpenArchiver
|
||||
```
|
||||
|
||||
2. **Configure your environment:**
|
||||
@@ -101,7 +104,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
4. **Access the application:**
|
||||
Once the services are running, you can access the Open Archiver web interface by navigating to `http://localhost:3000` in your web browser.
|
||||
|
||||
## ⚙️ Data Source Configuration
|
||||
## Data Source Configuration
|
||||
|
||||
After deploying the application, you will need to configure one or more ingestion sources to begin archiving emails. Follow our detailed guides to connect to your email provider:
|
||||
|
||||
@@ -109,7 +112,7 @@ After deploying the application, you will need to configure one or more ingestio
|
||||
- [Connecting to Microsoft 365](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
- [Connecting to a Generic IMAP Server](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
|
||||
## 🤝 Contributing
|
||||
## Contributing
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Dockerfile for Open Archiver
|
||||
# Dockerfile for the OSS version of Open Archiver
|
||||
|
||||
ARG BASE_IMAGE=node:22-alpine
|
||||
|
||||
@@ -15,12 +15,13 @@ COPY package.json pnpm-workspace.yaml pnpm-lock.yaml* ./
|
||||
COPY packages/backend/package.json ./packages/backend/
|
||||
COPY packages/frontend/package.json ./packages/frontend/
|
||||
COPY packages/types/package.json ./packages/types/
|
||||
COPY apps/open-archiver/package.json ./apps/open-archiver/
|
||||
|
||||
# 1. Build Stage: Install all dependencies and build the project
|
||||
FROM base AS build
|
||||
COPY packages/frontend/svelte.config.js ./packages/frontend/
|
||||
|
||||
# Install all dependencies. Use --shamefully-hoist to create a flat node_modules structure
|
||||
# Install all dependencies.
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store \
|
||||
pnpm install --shamefully-hoist --frozen-lockfile --prod=false
|
||||
@@ -28,19 +29,19 @@ RUN --mount=type=cache,id=pnpm,target=/pnpm/store \
|
||||
# Copy the rest of the source code
|
||||
COPY . .
|
||||
|
||||
# Build all packages.
|
||||
RUN pnpm build
|
||||
# Build the OSS packages.
|
||||
RUN pnpm build:oss
|
||||
|
||||
# 2. Production Stage: Install only production dependencies and copy built artifacts
|
||||
FROM base AS production
|
||||
|
||||
|
||||
# Copy built application from build stage
|
||||
COPY --from=build /app/packages/backend/dist ./packages/backend/dist
|
||||
COPY --from=build /app/packages/frontend/build ./packages/frontend/build
|
||||
COPY --from=build /app/packages/types/dist ./packages/types/dist
|
||||
COPY --from=build /app/packages/backend/drizzle.config.ts ./packages/backend/drizzle.config.ts
|
||||
COPY --from=build /app/packages/backend/src/database/migrations ./packages/backend/src/database/migrations
|
||||
COPY --from=build /app/packages/frontend/build ./packages/frontend/build
|
||||
COPY --from=build /app/packages/types/dist ./packages/types/dist
|
||||
COPY --from=build /app/apps/open-archiver/dist ./apps/open-archiver/dist
|
||||
|
||||
# Copy the entrypoint script and make it executable
|
||||
COPY docker/docker-entrypoint.sh /usr/local/bin/
|
||||
@@ -53,4 +54,4 @@ EXPOSE 3000
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
|
||||
# Start the application
|
||||
CMD ["pnpm", "docker-start"]
|
||||
CMD ["pnpm", "docker-start:oss"]
|
||||
24
apps/open-archiver/index.ts
Normal file
24
apps/open-archiver/index.ts
Normal file
@@ -0,0 +1,24 @@
|
||||
import { createServer, logger } from '@open-archiver/backend';
|
||||
import * as dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
async function start() {
|
||||
// --- Environment Variable Validation ---
|
||||
const { PORT_BACKEND } = process.env;
|
||||
|
||||
if (!PORT_BACKEND) {
|
||||
throw new Error('Missing required environment variables for the backend: PORT_BACKEND.');
|
||||
}
|
||||
// Create the server instance (passing no modules for the default OSS version)
|
||||
const app = await createServer([]);
|
||||
|
||||
app.listen(PORT_BACKEND, () => {
|
||||
logger.info({}, `✅ Open Archiver (OSS) running on port ${PORT_BACKEND}`);
|
||||
});
|
||||
}
|
||||
|
||||
start().catch((error) => {
|
||||
logger.error({ error }, 'Failed to start the server:', error);
|
||||
process.exit(1);
|
||||
});
|
||||
18
apps/open-archiver/package.json
Normal file
18
apps/open-archiver/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "open-archiver-app",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "ts-node-dev --respawn --transpile-only index.ts",
|
||||
"build": "tsc",
|
||||
"start": "node dist/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"@open-archiver/backend": "workspace:*",
|
||||
"dotenv": "^17.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/dotenv": "^8.2.3",
|
||||
"ts-node-dev": "^2.0.0"
|
||||
}
|
||||
}
|
||||
8
apps/open-archiver/tsconfig.json
Normal file
8
apps/open-archiver/tsconfig.json
Normal file
@@ -0,0 +1,8 @@
|
||||
{
|
||||
"extends": "../../tsconfig.base.json",
|
||||
"compilerOptions": {
|
||||
"outDir": "dist"
|
||||
},
|
||||
"include": ["./**/*.ts"],
|
||||
"references": [{ "path": "../../packages/backend" }]
|
||||
}
|
||||
BIN
assets/screenshots/integrity-report.png
Normal file
BIN
assets/screenshots/integrity-report.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 304 KiB |
BIN
assets/screenshots/job-queue.png
Normal file
BIN
assets/screenshots/job-queue.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 259 KiB |
@@ -6,12 +6,11 @@ services:
|
||||
container_name: open-archiver
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '4000:4000' # Backend
|
||||
- '3000:3000' # Frontend
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
- ${STORAGE_LOCAL_ROOT_PATH}:${STORAGE_LOCAL_ROOT_PATH}
|
||||
depends_on:
|
||||
- postgres
|
||||
- valkey
|
||||
@@ -29,8 +28,6 @@ services:
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
ports:
|
||||
- '5432:5432'
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
@@ -39,8 +36,6 @@ services:
|
||||
container_name: valkey
|
||||
restart: unless-stopped
|
||||
command: valkey-server --requirepass ${REDIS_PASSWORD}
|
||||
ports:
|
||||
- '6379:6379'
|
||||
volumes:
|
||||
- valkeydata:/data
|
||||
networks:
|
||||
@@ -52,13 +47,19 @@ services:
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-aSampleMasterKey}
|
||||
ports:
|
||||
- '7700:7700'
|
||||
MEILI_SCHEDULE_SNAPSHOT: ${MEILI_SCHEDULE_SNAPSHOT:-86400}
|
||||
volumes:
|
||||
- meilidata:/meili_data
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
tika:
|
||||
image: apache/tika:3.2.2.0-full
|
||||
container_name: tika
|
||||
restart: always
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
driver: local
|
||||
@@ -66,8 +67,6 @@ volumes:
|
||||
driver: local
|
||||
meilidata:
|
||||
driver: local
|
||||
archiver-data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
open-archiver-net:
|
||||
|
||||
@@ -6,12 +6,13 @@ export default defineConfig({
|
||||
'script',
|
||||
{
|
||||
defer: '',
|
||||
src: 'https://analytics.zenceipt.com/script.js',
|
||||
src: 'https://analytics.openarchiver.com/script.js',
|
||||
'data-website-id': '2c8b452e-eab5-4f82-8ead-902d8f8b976f',
|
||||
},
|
||||
],
|
||||
['link', { rel: 'icon', href: '/logo-sq.svg' }],
|
||||
],
|
||||
title: 'Open Archiver',
|
||||
title: 'Open Archiver Docs',
|
||||
description: 'Official documentation for the Open Archiver project.',
|
||||
themeConfig: {
|
||||
search: {
|
||||
@@ -32,6 +33,7 @@ export default defineConfig({
|
||||
items: [
|
||||
{ text: 'Get Started', link: '/' },
|
||||
{ text: 'Installation', link: '/user-guides/installation' },
|
||||
{ text: 'Email Integrity Check', link: '/user-guides/integrity-check' },
|
||||
{
|
||||
text: 'Email Providers',
|
||||
link: '/user-guides/email-providers/',
|
||||
@@ -51,6 +53,31 @@ export default defineConfig({
|
||||
},
|
||||
{ text: 'EML Import', link: '/user-guides/email-providers/eml' },
|
||||
{ text: 'PST Import', link: '/user-guides/email-providers/pst' },
|
||||
{ text: 'Mbox Import', link: '/user-guides/email-providers/mbox' },
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'Settings',
|
||||
collapsed: true,
|
||||
items: [
|
||||
{
|
||||
text: 'System',
|
||||
link: '/user-guides/settings/system',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'Upgrading and Migration',
|
||||
collapsed: true,
|
||||
items: [
|
||||
{
|
||||
text: 'Upgrading',
|
||||
link: '/user-guides/upgrade-and-migration/upgrade',
|
||||
},
|
||||
{
|
||||
text: 'Meilisearch Upgrade',
|
||||
link: '/user-guides/upgrade-and-migration/meilisearch-upgrade',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
@@ -60,12 +87,15 @@ export default defineConfig({
|
||||
items: [
|
||||
{ text: 'Overview', link: '/api/' },
|
||||
{ text: 'Authentication', link: '/api/authentication' },
|
||||
{ text: 'Rate Limiting', link: '/api/rate-limiting' },
|
||||
{ text: 'Auth', link: '/api/auth' },
|
||||
{ text: 'Archived Email', link: '/api/archived-email' },
|
||||
{ text: 'Dashboard', link: '/api/dashboard' },
|
||||
{ text: 'Ingestion', link: '/api/ingestion' },
|
||||
{ text: 'Integrity Check', link: '/api/integrity' },
|
||||
{ text: 'Search', link: '/api/search' },
|
||||
{ text: 'Storage', link: '/api/storage' },
|
||||
{ text: 'Jobs', link: '/api/jobs' },
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -73,6 +103,11 @@ export default defineConfig({
|
||||
items: [
|
||||
{ text: 'Overview', link: '/services/' },
|
||||
{ text: 'Storage Service', link: '/services/storage-service' },
|
||||
{ text: 'OCR Service', link: '/services/ocr-service' },
|
||||
{
|
||||
text: 'IAM Service',
|
||||
items: [{ text: 'IAM Policies', link: '/services/iam-service/iam-policy' }],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
|
||||
@@ -1,60 +1,25 @@
|
||||
# API Authentication
|
||||
|
||||
To access protected API endpoints, you need to include a JSON Web Token (JWT) in the `Authorization` header of your requests.
|
||||
To access protected API endpoints, you need to include a user-generated API key in the `X-API-KEY` header of your requests.
|
||||
|
||||
## Obtaining a JWT
|
||||
## 1. Creating an API Key
|
||||
|
||||
First, you need to authenticate with the `/api/v1/auth/login` endpoint by providing your email and password. If the credentials are correct, the API will return an `accessToken`.
|
||||
You can create, manage, and view your API keys through the application's user interface.
|
||||
|
||||
**Request:**
|
||||
1. Navigate to **Settings > API Keys** in the dashboard.
|
||||
2. Click the **"Generate API Key"** button.
|
||||
3. Provide a descriptive name for your key and select an expiration period.
|
||||
4. The new API key will be displayed. **Copy this key immediately and store it in a secure location. You will not be able to see it again.**
|
||||
|
||||
```http
|
||||
POST /api/v1/auth/login
|
||||
Content-Type: application/json
|
||||
## 2. Making Authenticated Requests
|
||||
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"password": "your-password"
|
||||
}
|
||||
```
|
||||
|
||||
**Successful Response:**
|
||||
|
||||
```json
|
||||
{
|
||||
"accessToken": "your.jwt.token",
|
||||
"user": {
|
||||
"id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"role": "user"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Making Authenticated Requests
|
||||
|
||||
Once you have the `accessToken`, you must include it in the `Authorization` header of all subsequent requests to protected endpoints, using the `Bearer` scheme.
|
||||
Once you have your API key, you must include it in the `X-API-KEY` header of all subsequent requests to protected API endpoints.
|
||||
|
||||
**Example:**
|
||||
|
||||
```http
|
||||
GET /api/v1/dashboard/stats
|
||||
Authorization: Bearer your.jwt.token
|
||||
X-API-KEY: a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2
|
||||
```
|
||||
|
||||
If the token is missing, expired, or invalid, the API will respond with a `401 Unauthorized` status code.
|
||||
|
||||
## Using a Super API Key
|
||||
|
||||
Alternatively, for server-to-server communication or scripts, you can use a super API key. This key provides unrestricted access to the API and should be kept secret.
|
||||
|
||||
You can set the `SUPER_API_KEY` in your `.env` file.
|
||||
|
||||
To authenticate using the super API key, include it in the `Authorization` header as a Bearer token.
|
||||
|
||||
**Example:**
|
||||
|
||||
```http
|
||||
GET /api/v1/dashboard/stats
|
||||
Authorization: Bearer your-super-secret-api-key
|
||||
```
|
||||
If the API key is missing, expired, or invalid, the API will respond with a `401 Unauthorized` status code.
|
||||
|
||||
@@ -19,11 +19,45 @@ The request body should be a `CreateIngestionSourceDto` object.
|
||||
```typescript
|
||||
interface CreateIngestionSourceDto {
|
||||
name: string;
|
||||
provider: 'google' | 'microsoft' | 'generic_imap';
|
||||
provider: 'google_workspace' | 'microsoft_365' | 'generic_imap' | 'pst_import' | 'eml_import' | 'mbox_import';
|
||||
providerConfig: IngestionCredentials;
|
||||
}
|
||||
```
|
||||
|
||||
#### Example: Creating an Mbox Import Source with File Upload
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "My Mbox Import",
|
||||
"provider": "mbox_import",
|
||||
"providerConfig": {
|
||||
"type": "mbox_import",
|
||||
"uploadedFileName": "emails.mbox",
|
||||
"uploadedFilePath": "open-archiver/tmp/uuid-emails.mbox"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Example: Creating an Mbox Import Source with Local File Path
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "My Mbox Import",
|
||||
"provider": "mbox_import",
|
||||
"providerConfig": {
|
||||
"type": "mbox_import",
|
||||
"localFilePath": "/path/to/emails.mbox"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** When using `localFilePath`, the file will not be deleted after import. When using `uploadedFilePath` (via the upload API), the file will be automatically deleted after import. The same applies to `pst_import` and `eml_import` providers.
|
||||
|
||||
**Important regarding `localFilePath`:** When running OpenArchiver in a Docker container (which is the standard deployment), `localFilePath` refers to the path **inside the Docker container**, not on the host machine.
|
||||
To use a local file:
|
||||
1. **Recommended:** Place your file inside the directory defined by `STORAGE_LOCAL_ROOT_PATH` (e.g., inside a `temp` folder). Since this directory is already mounted as a volume, the file will be accessible at the same path inside the container.
|
||||
2. **Alternative:** Mount a specific directory containing your files as a volume in `docker-compose.yml`. For example, add `- /path/to/my/files:/imports` to the `volumes` section and use `/imports/myfile.pst` as the `localFilePath`.
|
||||
|
||||
#### Responses
|
||||
|
||||
- **201 Created:** The newly created ingestion source.
|
||||
|
||||
51
docs/api/integrity.md
Normal file
51
docs/api/integrity.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Integrity Check API
|
||||
|
||||
The Integrity Check API provides an endpoint to verify the cryptographic hash of an archived email and its attachments against the stored values in the database. This allows you to ensure that the stored files have not been tampered with or corrupted since they were archived.
|
||||
|
||||
## Check Email Integrity
|
||||
|
||||
Verifies the integrity of a specific archived email and all of its associated attachments.
|
||||
|
||||
- **URL:** `/api/v1/integrity/:id`
|
||||
- **Method:** `GET`
|
||||
- **URL Params:**
|
||||
- `id=[string]` (required) - The UUID of the archived email to check.
|
||||
- **Permissions:** `read:archive`
|
||||
- **Success Response:**
|
||||
- **Code:** 200 OK
|
||||
- **Content:** `IntegrityCheckResult[]`
|
||||
|
||||
### Response Body `IntegrityCheckResult`
|
||||
|
||||
An array of objects, each representing the result of an integrity check for a single file (either the email itself or an attachment).
|
||||
|
||||
| Field | Type | Description |
|
||||
| :--------- | :------------------------ | :-------------------------------------------------------------------------- |
|
||||
| `type` | `'email' \| 'attachment'` | The type of the file being checked. |
|
||||
| `id` | `string` | The UUID of the email or attachment. |
|
||||
| `filename` | `string` (optional) | The filename of the attachment. This field is only present for attachments. |
|
||||
| `isValid` | `boolean` | `true` if the current hash matches the stored hash, otherwise `false`. |
|
||||
| `reason` | `string` (optional) | A reason for the failure. Only present if `isValid` is `false`. |
|
||||
|
||||
### Example Response
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"type": "email",
|
||||
"id": "a1b2c3d4-e5f6-7890-1234-567890abcdef",
|
||||
"isValid": true
|
||||
},
|
||||
{
|
||||
"type": "attachment",
|
||||
"id": "b2c3d4e5-f6a7-8901-2345-67890abcdef1",
|
||||
"filename": "document.pdf",
|
||||
"isValid": false,
|
||||
"reason": "Stored hash does not match current hash."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
- **Error Response:**
|
||||
- **Code:** 404 Not Found
|
||||
- **Content:** `{ "message": "Archived email not found" }`
|
||||
128
docs/api/jobs.md
Normal file
128
docs/api/jobs.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Jobs API
|
||||
|
||||
The Jobs API provides endpoints for monitoring the job queues and the jobs within them.
|
||||
|
||||
## Overview
|
||||
|
||||
Open Archiver uses a job queue system to handle asynchronous tasks like email ingestion and indexing. The system is built on Redis and BullMQ and uses a producer-consumer pattern.
|
||||
|
||||
### Job Statuses
|
||||
|
||||
Jobs can have one of the following statuses:
|
||||
|
||||
- **active:** The job is currently being processed.
|
||||
- **completed:** The job has been completed successfully.
|
||||
- **failed:** The job has failed after all retry attempts.
|
||||
- **delayed:** The job is delayed and will be processed at a later time.
|
||||
- **waiting:** The job is waiting to be processed.
|
||||
- **paused:** The job is paused and will not be processed until it is resumed.
|
||||
|
||||
### Errors
|
||||
|
||||
When a job fails, the `failedReason` and `stacktrace` fields will contain information about the error. The `error` field will also be populated with the `failedReason` for easier access.
|
||||
|
||||
### Job Preservation
|
||||
|
||||
Jobs are preserved for a limited time after they are completed or failed. This means that the job counts and the jobs that you see in the API are for a limited time.
|
||||
|
||||
- **Completed jobs:** The last 1000 completed jobs are preserved.
|
||||
- **Failed jobs:** The last 5000 failed jobs are preserved.
|
||||
|
||||
## Get All Queues
|
||||
|
||||
- **Endpoint:** `GET /v1/jobs/queues`
|
||||
- **Description:** Retrieves a list of all job queues and their job counts.
|
||||
- **Permissions:** `manage:all`
|
||||
- **Responses:**
|
||||
- `200 OK`: Returns a list of queue overviews.
|
||||
- `401 Unauthorized`: If the user is not authenticated.
|
||||
- `403 Forbidden`: If the user does not have the required permissions.
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"queues": [
|
||||
{
|
||||
"name": "ingestion",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 56,
|
||||
"failed": 4,
|
||||
"delayed": 3,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "indexing",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 0,
|
||||
"failed": 0,
|
||||
"delayed": 0,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Get Queue Jobs
|
||||
|
||||
- **Endpoint:** `GET /v1/jobs/queues/:queueName`
|
||||
- **Description:** Retrieves a list of jobs within a specific queue, with pagination and filtering by status.
|
||||
- **Permissions:** `manage:all`
|
||||
- **URL Parameters:**
|
||||
- `queueName` (string, required): The name of the queue to retrieve jobs from.
|
||||
- **Query Parameters:**
|
||||
- `status` (string, optional): The status of the jobs to retrieve. Can be one of `active`, `completed`, `failed`, `delayed`, `waiting`, `paused`. Defaults to `failed`.
|
||||
- `page` (number, optional): The page number to retrieve. Defaults to `1`.
|
||||
- `limit` (number, optional): The number of jobs to retrieve per page. Defaults to `10`.
|
||||
- **Responses:**
|
||||
- `200 OK`: Returns a detailed view of the queue, including a paginated list of jobs.
|
||||
- `401 Unauthorized`: If the user is not authenticated.
|
||||
- `403 Forbidden`: If the user does not have the required permissions.
|
||||
- `404 Not Found`: If the specified queue does not exist.
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "ingestion",
|
||||
"counts": {
|
||||
"active": 0,
|
||||
"completed": 56,
|
||||
"failed": 4,
|
||||
"delayed": 3,
|
||||
"waiting": 0,
|
||||
"paused": 0
|
||||
},
|
||||
"jobs": [
|
||||
{
|
||||
"id": "1",
|
||||
"name": "initial-import",
|
||||
"data": {
|
||||
"ingestionSourceId": "clx1y2z3a0000b4d2e5f6g7h8"
|
||||
},
|
||||
"state": "failed",
|
||||
"failedReason": "Error: Connection timed out",
|
||||
"timestamp": 1678886400000,
|
||||
"processedOn": 1678886401000,
|
||||
"finishedOn": 1678886402000,
|
||||
"attemptsMade": 5,
|
||||
"stacktrace": ["..."],
|
||||
"returnValue": null,
|
||||
"ingestionSourceId": "clx1y2z3a0000b4d2e5f6g7h8",
|
||||
"error": "Error: Connection timed out"
|
||||
}
|
||||
],
|
||||
"pagination": {
|
||||
"currentPage": 1,
|
||||
"totalPages": 1,
|
||||
"totalJobs": 4,
|
||||
"limit": 10
|
||||
}
|
||||
}
|
||||
```
|
||||
51
docs/api/rate-limiting.md
Normal file
51
docs/api/rate-limiting.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Rate Limiting
|
||||
|
||||
The API implements rate limiting as a security measure to protect your instance from denial-of-service (DoS) and brute-force attacks. This is a crucial feature for maintaining the security and stability of the application.
|
||||
|
||||
## How It Works
|
||||
|
||||
The rate limiter restricts the number of requests an IP address can make within a specific time frame. These limits are configurable via environment variables to suit your security needs.
|
||||
|
||||
By default, the limits are:
|
||||
|
||||
- **100 requests** per **1 minute** per IP address.
|
||||
|
||||
If this limit is exceeded, the API will respond with an HTTP `429 Too Many Requests` status code.
|
||||
|
||||
### Response Body
|
||||
|
||||
When an IP address is rate-limited, the API will return a JSON response with the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": 429,
|
||||
"message": "Too many requests from this IP, please try again after 15 minutes"
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
You can customize the rate-limiting settings by setting the following environment variables in your `.env` file:
|
||||
|
||||
- `RATE_LIMIT_WINDOW_MS`: The time window in milliseconds. Defaults to `60000` (1 minute).
|
||||
- `RATE_LIMIT_MAX_REQUESTS`: The maximum number of requests allowed per IP address within the time window. Defaults to `100`.
|
||||
|
||||
## Handling Rate Limits
|
||||
|
||||
If you are developing a client that interacts with the API, you should handle rate limiting gracefully:
|
||||
|
||||
1. **Check the Status Code**: Monitor for a `429` HTTP status code in responses.
|
||||
2. **Implement a Retry Mechanism**: When you receive a `429` response, it is best practice to wait before retrying the request. Implementing an exponential backoff strategy is recommended.
|
||||
3. **Check Headers**: The response will include the following standard headers to help you manage your request rate:
|
||||
- `RateLimit-Limit`: The maximum number of requests allowed in the current window.
|
||||
- `RateLimit-Remaining`: The number of requests you have left in the current window.
|
||||
- `RateLimit-Reset`: The time when the rate limit window will reset, in UTC epoch seconds.
|
||||
|
||||
## Excluded Endpoints
|
||||
|
||||
Certain essential endpoints are excluded from rate limiting to ensure the application's UI remains responsive. These are:
|
||||
|
||||
- `/auth/status`
|
||||
- `/settings/system`
|
||||
|
||||
These endpoints can be called as needed without affecting your rate limit count.
|
||||
78
docs/enterprise/audit-log/api.md
Normal file
78
docs/enterprise/audit-log/api.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Audit Log: API Endpoints
|
||||
|
||||
The audit log feature exposes two API endpoints for retrieving and verifying audit log data. Both endpoints require authentication and are only accessible to users with the appropriate permissions.
|
||||
|
||||
## Get Audit Logs
|
||||
|
||||
Retrieves a paginated list of audit log entries, with support for filtering and sorting.
|
||||
|
||||
- **Endpoint:** `GET /api/v1/enterprise/audit-logs`
|
||||
- **Method:** `GET`
|
||||
- **Authentication:** Required
|
||||
|
||||
### Query Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| ------------ | -------- | --------------------------------------------------------------------------- |
|
||||
| `page` | `number` | The page number to retrieve. Defaults to `1`. |
|
||||
| `limit` | `number` | The number of entries to retrieve per page. Defaults to `20`. |
|
||||
| `startDate` | `date` | The start date for the date range filter. |
|
||||
| `endDate` | `date` | The end date for the date range filter. |
|
||||
| `actor` | `string` | The actor identifier to filter by. |
|
||||
| `actionType` | `string` | The action type to filter by (e.g., `LOGIN`, `CREATE`). |
|
||||
| `sort` | `string` | The sort order for the results. Can be `asc` or `desc`. Defaults to `desc`. |
|
||||
|
||||
### Response Body
|
||||
|
||||
```json
|
||||
{
|
||||
"data": [
|
||||
{
|
||||
"id": 1,
|
||||
"previousHash": null,
|
||||
"timestamp": "2025-10-03T00:00:00.000Z",
|
||||
"actorIdentifier": "e8026a75-b58a-4902-8858-eb8780215f82",
|
||||
"actorIp": "::1",
|
||||
"actionType": "LOGIN",
|
||||
"targetType": "User",
|
||||
"targetId": "e8026a75-b58a-4902-8858-eb8780215f82",
|
||||
"details": {},
|
||||
"currentHash": "..."
|
||||
}
|
||||
],
|
||||
"meta": {
|
||||
"total": 100,
|
||||
"page": 1,
|
||||
"limit": 20
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Verify Audit Log Integrity
|
||||
|
||||
Initiates a verification process to check the integrity of the entire audit log chain.
|
||||
|
||||
- **Endpoint:** `POST /api/v1/enterprise/audit-logs/verify`
|
||||
- **Method:** `POST`
|
||||
- **Authentication:** Required
|
||||
|
||||
### Response Body
|
||||
|
||||
**Success**
|
||||
|
||||
```json
|
||||
{
|
||||
"ok": true,
|
||||
"message": "Audit log integrity verified successfully."
|
||||
}
|
||||
```
|
||||
|
||||
**Failure**
|
||||
|
||||
```json
|
||||
{
|
||||
"ok": false,
|
||||
"message": "Audit log chain is broken!",
|
||||
"logId": 123
|
||||
}
|
||||
```
|
||||
31
docs/enterprise/audit-log/audit-service.md
Normal file
31
docs/enterprise/audit-log/audit-service.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# Audit Log: Backend Implementation
|
||||
|
||||
The backend implementation of the audit log is handled by the `AuditService`, located in `packages/backend/src/services/AuditService.ts`. This service encapsulates all the logic for creating, retrieving, and verifying audit log entries.
|
||||
|
||||
## Hashing and Verification Logic
|
||||
|
||||
The core of the audit log's immutability lies in its hashing and verification logic.
|
||||
|
||||
### Hash Calculation
|
||||
|
||||
The `calculateHash` method is responsible for generating a SHA-256 hash of a log entry. To ensure consistency, it performs the following steps:
|
||||
|
||||
1. **Canonical Object Creation:** It constructs a new object with a fixed property order, ensuring that the object's structure is always the same.
|
||||
2. **Timestamp Normalization:** It converts the `timestamp` to milliseconds since the epoch (`getTime()`) to avoid any precision-related discrepancies between the application and the database.
|
||||
3. **Canonical Stringification:** It uses a custom `canonicalStringify` function to create a JSON string representation of the object. This function sorts the object keys, ensuring that the output is always the same, regardless of the in-memory property order.
|
||||
4. **Hash Generation:** It computes a SHA-256 hash of the canonical string.
|
||||
|
||||
### Verification Process
|
||||
|
||||
The `verifyAuditLog` method is designed to be highly scalable and efficient, even with millions of log entries. It processes the logs in manageable chunks (e.g., 1000 at a time) to avoid loading the entire table into memory.
|
||||
|
||||
The verification process involves the following steps:
|
||||
|
||||
1. **Iterative Processing:** It fetches the logs in batches within a `while` loop.
|
||||
2. **Chain Verification:** For each log entry, it compares the `previousHash` with the `currentHash` of the preceding log. If they do not match, the chain is broken, and the verification fails.
|
||||
3. **Hash Recalculation:** It recalculates the hash of the current log entry using the same `calculateHash` method used during creation.
|
||||
4. **Integrity Check:** It compares the recalculated hash with the `currentHash` stored in the database. If they do not match, the log entry has been tampered with, and the verification fails.
|
||||
|
||||
## Service Integration
|
||||
|
||||
The `AuditService` is integrated into the application through the `AuditLogModule` (`packages/enterprise/src/modules/audit-log/audit-log.module.ts`), which registers the API routes for the audit log feature. The service's `createAuditLog` method is called from various other services throughout the application to record significant events.
|
||||
39
docs/enterprise/audit-log/guide.md
Normal file
39
docs/enterprise/audit-log/guide.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Audit Log: User Interface
|
||||
|
||||
The audit log user interface provides a comprehensive view of all significant events that have occurred within the Open Archiver system. It is designed to be intuitive and user-friendly, allowing administrators to easily monitor and review system activity.
|
||||
|
||||
## Viewing Audit Logs
|
||||
|
||||
The main audit log page displays a table of log entries, with the following columns:
|
||||
|
||||
- **Timestamp:** The date and time of the event.
|
||||
- **Actor:** The identifier of the user or system process that performed the action.
|
||||
- **IP Address:** The IP address from which the action was initiated.
|
||||
- **Action:** The type of action performed, displayed as a color-coded badge for easy identification.
|
||||
- **Target Type:** The type of resource that was affected.
|
||||
- **Target ID:** The unique identifier of the affected resource.
|
||||
- **Details:** A truncated preview of the event's details. The full JSON object is displayed in a pop-up card on hover.
|
||||
|
||||
## Filtering and Sorting
|
||||
|
||||
The table can be sorted by timestamp by clicking the "Timestamp" header. This allows you to view the logs in either chronological or reverse chronological order.
|
||||
|
||||
## Pagination
|
||||
|
||||
Pagination controls are available below the table, allowing you to navigate through the entire history of audit log entries.
|
||||
|
||||
## Verifying Log Integrity
|
||||
|
||||
The "Verify Log Integrity" button allows you to initiate a verification process to check the integrity of the entire audit log chain. This process recalculates the hash of each log entry and compares it to the stored hash, ensuring that the cryptographic chain is unbroken and no entries have been tampered with.
|
||||
|
||||
### Verification Responses
|
||||
|
||||
- **Success:** A success notification is displayed, confirming that the audit log integrity has been verified successfully. This means that the log chain is complete and no entries have been tampered with.
|
||||
|
||||
- **Failure:** An error notification is displayed, indicating that the audit log chain is broken or an entry has been tampered with. The notification will include the ID of the log entry where the issue was detected. There are two types of failures:
|
||||
- **Audit log chain is broken:** This means that the `previousHash` of a log entry does not match the `currentHash` of the preceding entry. This indicates that one or more log entries may have been deleted or inserted into the chain.
|
||||
- **Audit log entry is tampered!:** This means that the recalculated hash of a log entry does not match its stored `currentHash`. This indicates that the data within the log entry has been altered.
|
||||
|
||||
## Viewing Log Details
|
||||
|
||||
You can view the full details of any log entry by clicking on its row in the table. This will open a dialog containing all the information associated with the log entry, including the previous and current hashes.
|
||||
27
docs/enterprise/audit-log/index.md
Normal file
27
docs/enterprise/audit-log/index.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Audit Log
|
||||
|
||||
The Audit Log is an enterprise-grade feature designed to provide a complete, immutable, and verifiable record of every significant action that occurs within the Open Archiver system. Its primary purpose is to ensure compliance with strict regulatory standards, such as the German GoBD, by establishing a tamper-proof chain of evidence for all activities.
|
||||
|
||||
## Core Principles
|
||||
|
||||
To fulfill its compliance and security functions, the audit log adheres to the following core principles:
|
||||
|
||||
### 1. Immutability
|
||||
|
||||
Every log entry is cryptographically chained to the previous one. Each new entry contains a SHA-256 hash of the preceding entry's hash, creating a verifiable chain. Any attempt to alter or delete a past entry would break this chain and be immediately detectable through the verification process.
|
||||
|
||||
### 2. Completeness
|
||||
|
||||
The system is designed to log every significant event without exception. This includes not only user-initiated actions (like logins, searches, and downloads) but also automated system processes, such as data ingestion and policy-based deletions.
|
||||
|
||||
### 3. Attribution
|
||||
|
||||
Each log entry is unambiguously linked to the actor that initiated the event. This could be a specific authenticated user, an external auditor, or an automated system process. The actor's identifier and source IP address are recorded to ensure full traceability.
|
||||
|
||||
### 4. Clarity and Detail
|
||||
|
||||
Log entries are structured to be detailed and human-readable, providing sufficient context for an auditor to understand the event without needing specialized system knowledge. This includes the action performed, the target resource affected, and a JSON object with specific, contextual details of the event.
|
||||
|
||||
### 5. Verifiability
|
||||
|
||||
The integrity of the entire audit log can be verified at any time. A dedicated process iterates through the logs from the beginning, recalculating the hash of each entry and comparing it to the stored hash, ensuring the cryptographic chain is unbroken and no entries have been tampered with.
|
||||
@@ -1,4 +1,4 @@
|
||||
# IAM Policies
|
||||
# IAM Policy
|
||||
|
||||
This document provides a guide to creating and managing IAM policies in Open Archiver. It is intended for developers and administrators who need to configure granular access control for users and roles.
|
||||
|
||||
96
docs/services/ocr-service.md
Normal file
96
docs/services/ocr-service.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# OCR Service
|
||||
|
||||
The OCR (Optical Character Recognition) and text extraction service is responsible for extracting plain text content from various file formats, such as PDFs, Office documents, and more. This is a crucial component for making email attachments searchable.
|
||||
|
||||
## Overview
|
||||
|
||||
The system employs a two-pronged approach for text extraction:
|
||||
|
||||
1. **Primary Extractor (Apache Tika)**: A powerful and versatile toolkit that can extract text from a wide variety of file formats. It is the recommended method for its superior performance and format support.
|
||||
2. **Legacy Extractor**: A fallback mechanism that uses a combination of libraries (`pdf2json`, `mammoth`, `xlsx`) for common file types like PDF, DOCX, and XLSX. This is used when Apache Tika is not configured.
|
||||
|
||||
The main logic resides in `packages/backend/src/helpers/textExtractor.ts`, which decides which extraction method to use based on the application's configuration.
|
||||
|
||||
## Configuration
|
||||
|
||||
To enable the primary text extraction method, you must configure the URL of an Apache Tika server instance in your environment variables.
|
||||
|
||||
In your `.env` file, set the `TIKA_URL`:
|
||||
|
||||
```env
|
||||
# .env.example
|
||||
|
||||
# Apache Tika Integration
|
||||
# ONLY active if TIKA_URL is set
|
||||
TIKA_URL=http://tika:9998
|
||||
```
|
||||
|
||||
If `TIKA_URL` is not set, the system will automatically fall back to the legacy extraction methods. The service performs a health check on startup to verify connectivity with the Tika server.
|
||||
|
||||
## File Size Limits
|
||||
|
||||
To prevent excessive memory usage and processing time, the service imposes a general size limit on files submitted for text extraction. Files larger than the configured limit will be skipped.
|
||||
|
||||
- **With Apache Tika**: The maximum file size is **100MB**.
|
||||
- **With Legacy Fallback**: The maximum file size is **50MB**.
|
||||
|
||||
## Supported File Formats
|
||||
|
||||
The service's ability to extract text depends on whether it's using Apache Tika or the legacy fallback methods.
|
||||
|
||||
### With Apache Tika
|
||||
|
||||
When `TIKA_URL` is configured, the service can process a vast range of file formats. Apache Tika is designed for broad compatibility and supports hundreds of file types, including but not limited to:
|
||||
|
||||
- Portable Document Format (PDF)
|
||||
- Microsoft Office formats (DOC, DOCX, PPT, PPTX, XLS, XLSX)
|
||||
- OpenDocument Formats (ODT, ODS, ODP)
|
||||
- Rich Text Format (RTF)
|
||||
- Plain Text (TXT, CSV, JSON, XML, HTML)
|
||||
- Image formats with OCR capabilities (PNG, JPEG, TIFF)
|
||||
- Archive formats (ZIP, TAR, GZ)
|
||||
- Email formats (EML, MSG)
|
||||
|
||||
For a complete and up-to-date list, please refer to the official [Apache Tika documentation](https://tika.apache.org/3.2.3/formats.html).
|
||||
|
||||
### With Legacy Fallback
|
||||
|
||||
When Tika is not configured, text extraction is limited to the following formats:
|
||||
|
||||
- `application/pdf` (PDF)
|
||||
- `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX)
|
||||
- `application/vnd.openxmlformats-officedocument.spreadsheetml.sheet` (XLSX)
|
||||
- Plain text formats such as `text/*`, `application/json`, and `application/xml`.
|
||||
|
||||
## Features of the Tika Integration (`OcrService`)
|
||||
|
||||
The `OcrService` (`packages/backend/src/services/OcrService.ts`) provides several enhancements to make text extraction efficient and robust.
|
||||
|
||||
### Caching
|
||||
|
||||
To avoid redundant processing of the same file, the service implements a simple LRU (Least Recently Used) cache.
|
||||
|
||||
- **Cache Key**: A SHA-256 hash of the file's buffer is used as the cache key.
|
||||
- **Functionality**: If a file with the same hash is processed again, the text content is served directly from the cache, saving significant processing time.
|
||||
- **Statistics**: The service keeps track of cache hits, misses, and the hit rate for performance monitoring.
|
||||
|
||||
### Concurrency Management (Semaphore)
|
||||
|
||||
Extracting text from large files can be resource-intensive. To prevent the Tika server from being overwhelmed by multiple requests for the _same file_ simultaneously (e.g., during a large import), a semaphore mechanism is used.
|
||||
|
||||
- **Functionality**: If a request for a specific file (identified by its hash) is already in progress, any subsequent requests for the same file will wait for the first one to complete and then use its result.
|
||||
- **Benefit**: This deduplicates parallel processing efforts and reduces unnecessary load on the Tika server.
|
||||
|
||||
### Health Check and DNS Fallback
|
||||
|
||||
- **Availability Check**: The service includes a `checkTikaAvailability` method to verify that the Tika server is reachable and operational. This check is performed on application startup.
|
||||
- **DNS Fallback**: For convenience in Docker environments, if the Tika URL uses the hostname `tika` (e.g., `http://tika:9998`), the service will automatically attempt a fallback to `localhost` if the initial connection fails.
|
||||
|
||||
## Legacy Fallback Methods
|
||||
|
||||
When Tika is not available, the `extractTextLegacy` function in `textExtractor.ts` handles extraction for a limited set of MIME types:
|
||||
|
||||
- `application/pdf`: Processed using `pdf2json`. Includes a 50MB size limit and a 5-second timeout to prevent memory issues.
|
||||
- `application/vnd.openxmlformats-officedocument.wordprocessingml.document` (DOCX): Processed using `mammoth`.
|
||||
- `application/vnd.openxmlformats-officedocument.spreadsheetml.sheet` (XLSX): Processed using `xlsx`.
|
||||
- Plain text formats (`text/*`, `application/json`, `application/xml`): Converted directly from the buffer.
|
||||
@@ -30,7 +30,14 @@ archive.zip
|
||||
2. Click the **Create New** button.
|
||||
3. Select **EML Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. Click the **Choose File** button and select the zip archive containing your EML files.
|
||||
5. **Choose Import Method:**
|
||||
* **Upload File:** Click **Choose File** and select the zip archive containing your EML files. (Best for smaller archives)
|
||||
* **Local Path:** Enter the path to the zip file **inside the container**. (Best for large archives)
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your zip file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.zip` and enter `/data/temp/emails.zip` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
6. Click the **Submit** button.
|
||||
|
||||
OpenArchiver will then start importing the EML files from the zip archive. The ingestion process may take some time, depending on the size of the archive.
|
||||
|
||||
@@ -9,3 +9,4 @@ Choose your provider from the list below to get started:
|
||||
- [Generic IMAP Server](./imap.md)
|
||||
- [EML Import](./eml.md)
|
||||
- [PST Import](./pst.md)
|
||||
- [Mbox Import](./mbox.md)
|
||||
|
||||
35
docs/user-guides/email-providers/mbox.md
Normal file
35
docs/user-guides/email-providers/mbox.md
Normal file
@@ -0,0 +1,35 @@
|
||||
# Mbox Ingestion
|
||||
|
||||
Mbox is a common format for storing email messages. This guide will walk you through the process of ingesting mbox files into OpenArchiver.
|
||||
|
||||
## 1. Exporting from Your Email Client
|
||||
|
||||
Most email clients that support mbox exports will allow you to export a folder of emails as a single `.mbox` file. Here are the general steps:
|
||||
|
||||
- **Mozilla Thunderbird**: Right-click on a folder, select **ImportExportTools NG**, and then choose **Export folder**.
|
||||
- **Gmail**: You can use Google Takeout to export your emails in mbox format.
|
||||
- **Other Clients**: Refer to your email client's documentation for instructions on how to export emails to an mbox file.
|
||||
|
||||
## 2. Uploading to OpenArchiver
|
||||
|
||||
Once you have your `.mbox` file, you can upload it to OpenArchiver through the web interface.
|
||||
|
||||
1. Navigate to the **Ingestion** page.
|
||||
2. Click on the **New Ingestion** button.
|
||||
3. Select **Mbox** as the source type.
|
||||
4. **Choose Import Method:**
|
||||
* **Upload File:** Upload your `.mbox` file.
|
||||
* **Local Path:** Enter the path to the mbox file **inside the container**.
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your mbox file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/emails.mbox` and enter `/data/temp/emails.mbox` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
## 3. Folder Structure
|
||||
|
||||
OpenArchiver will attempt to preserve the original folder structure of your emails. This is done by inspecting the following email headers:
|
||||
|
||||
- `X-Gmail-Labels`: Used by Gmail to store labels.
|
||||
- `X-Folder`: A custom header used by some email clients like Thunderbird.
|
||||
|
||||
If neither of these headers is present, the emails will be ingested into the root of the archive.
|
||||
@@ -15,7 +15,14 @@ To ensure a successful import, you should prepare your PST file according to the
|
||||
2. Click the **Create New** button.
|
||||
3. Select **PST Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. Click the **Choose File** button and select the PST file.
|
||||
5. **Choose Import Method:**
|
||||
* **Upload File:** Click **Choose File** and select the PST file from your computer. (Best for smaller files)
|
||||
* **Local Path:** Enter the path to the PST file **inside the container**. (Best for large files)
|
||||
|
||||
> **Note on Local Path:** When using Docker, the "Local Path" is relative to the container's filesystem.
|
||||
> * **Recommended:** Place your file in a `temp` folder inside your configured storage directory (`STORAGE_LOCAL_ROOT_PATH`). This path is already mounted. For example, if your storage path is `/data`, put the file in `/data/temp/archive.pst` and enter `/data/temp/archive.pst` as the path.
|
||||
> * **Alternative:** Mount a separate volume in `docker-compose.yml` (e.g., `- /host/path:/container/path`) and use the container path.
|
||||
|
||||
6. Click the **Submit** button.
|
||||
|
||||
OpenArchiver will then start importing the emails from the PST file. The ingestion process may take some time, depending on the size of the file.
|
||||
|
||||
@@ -17,7 +17,22 @@ git clone https://github.com/LogicLabs-OU/OpenArchiver.git
|
||||
cd OpenArchiver
|
||||
```
|
||||
|
||||
## 2. Configure Your Environment
|
||||
## 2. Create a Directory for Local Storage (Important)
|
||||
|
||||
Before configuring the application, you **must** create a directory on your host machine where Open Archiver will store its data (such as emails and attachments). Manually creating this directory helps prevent potential permission issues.
|
||||
|
||||
Foe examples, you can use this path `/var/data/open-archiver`.
|
||||
|
||||
Run the following commands to create the directory and set the correct permissions:
|
||||
|
||||
```bash
|
||||
sudo mkdir -p /var/data/open-archiver
|
||||
sudo chown -R $(id -u):$(id -g) /var/data/open-archiver
|
||||
```
|
||||
|
||||
This ensures the directory is owned by your current user, which is necessary for the application to have write access. You will set this path in your `.env` file in the next step.
|
||||
|
||||
## 3. Configure Your Environment
|
||||
|
||||
The application is configured using environment variables. You'll need to create a `.env` file to store your configuration.
|
||||
|
||||
@@ -29,9 +44,15 @@ cp .env.example.docker .env
|
||||
|
||||
Now, open the `.env` file in a text editor and customize the settings.
|
||||
|
||||
### Important Configuration
|
||||
### Key Configuration Steps
|
||||
|
||||
You must change the following placeholder values to secure your instance:
|
||||
1. **Set the Storage Path**: Find the `STORAGE_LOCAL_ROOT_PATH` variable and set it to the path you just created.
|
||||
|
||||
```env
|
||||
STORAGE_LOCAL_ROOT_PATH=/var/data/open-archiver
|
||||
```
|
||||
|
||||
2. **Secure Your Instance**: You must change the following placeholder values to secure your instance:
|
||||
|
||||
- `POSTGRES_PASSWORD`: A strong, unique password for the database.
|
||||
- `REDIS_PASSWORD`: A strong, unique password for the Valkey/Redis service.
|
||||
@@ -41,6 +62,10 @@ You must change the following placeholder values to secure your instance:
|
||||
```bash
|
||||
openssl rand -hex 32
|
||||
```
|
||||
- `STORAGE_ENCRYPTION_KEY`: **(Optional but Recommended)** A 32-byte hex string for encrypting emails and attachments at rest. If this key is not provided, storage encryption will be disabled. You can generate one with:
|
||||
```bash
|
||||
openssl rand -hex 32
|
||||
```
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
@@ -65,29 +90,34 @@ Here is a complete list of environment variables available for configuration:
|
||||
|
||||
#### Application Settings
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ---------------- | ----------------------------------------------------------------------------------------------------- | ------------- |
|
||||
| `NODE_ENV` | The application environment. | `development` |
|
||||
| `PORT_BACKEND` | The port for the backend service. | `4000` |
|
||||
| `PORT_FRONTEND` | The port for the frontend service. | `3000` |
|
||||
| `SYNC_FREQUENCY` | The frequency of continuous email syncing. See [cron syntax](https://crontab.guru/) for more details. | `* * * * *` |
|
||||
| Variable | Description | Default Value |
|
||||
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------- |
|
||||
| `NODE_ENV` | The application environment. | `development` |
|
||||
| `PORT_BACKEND` | The port for the backend service. | `4000` |
|
||||
| `PORT_FRONTEND` | The port for the frontend service. | `3000` |
|
||||
| `APP_URL` | The public-facing URL of your application. This is used by the backend to configure CORS. | `http://localhost:3000` |
|
||||
| `ORIGIN` | Used by the SvelteKit Node adapter to determine the server's public-facing URL. It should always be set to the value of `APP_URL` (e.g., `ORIGIN=$APP_URL`). | `http://localhost:3000` |
|
||||
| `SYNC_FREQUENCY` | The frequency of continuous email syncing. See [cron syntax](https://crontab.guru/) for more details. | `* * * * *` |
|
||||
| `ALL_INCLUSIVE_ARCHIVE` | Set to `true` to include all emails, including Junk and Trash folders, in the email archive. | `false` |
|
||||
|
||||
#### Docker Compose Service Configuration
|
||||
|
||||
These variables are used by `docker-compose.yml` to configure the services.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ------------------- | ----------------------------------------------- | -------------------------------------------------------- |
|
||||
| `POSTGRES_DB` | The name of the PostgreSQL database. | `open_archive` |
|
||||
| `POSTGRES_USER` | The username for the PostgreSQL database. | `admin` |
|
||||
| `POSTGRES_PASSWORD` | The password for the PostgreSQL database. | `password` |
|
||||
| `DATABASE_URL` | The connection URL for the PostgreSQL database. | `postgresql://admin:password@postgres:5432/open_archive` |
|
||||
| `MEILI_MASTER_KEY` | The master key for Meilisearch. | `aSampleMasterKey` |
|
||||
| `MEILI_HOST` | The host for the Meilisearch service. | `http://meilisearch:7700` |
|
||||
| `REDIS_HOST` | The host for the Valkey (Redis) service. | `valkey` |
|
||||
| `REDIS_PORT` | The port for the Valkey (Redis) service. | `6379` |
|
||||
| `REDIS_PASSWORD` | The password for the Valkey (Redis) service. | `defaultredispassword` |
|
||||
| `REDIS_TLS_ENABLED` | Enable or disable TLS for Redis. | `false` |
|
||||
| Variable | Description | Default Value |
|
||||
| ---------------------- | ---------------------------------------------------- | -------------------------------------------------------- |
|
||||
| `POSTGRES_DB` | The name of the PostgreSQL database. | `open_archive` |
|
||||
| `POSTGRES_USER` | The username for the PostgreSQL database. | `admin` |
|
||||
| `POSTGRES_PASSWORD` | The password for the PostgreSQL database. | `password` |
|
||||
| `DATABASE_URL` | The connection URL for the PostgreSQL database. | `postgresql://admin:password@postgres:5432/open_archive` |
|
||||
| `MEILI_MASTER_KEY` | The master key for Meilisearch. | `aSampleMasterKey` |
|
||||
| `MEILI_HOST` | The host for the Meilisearch service. | `http://meilisearch:7700` |
|
||||
| `MEILI_INDEXING_BATCH` | The number of emails to batch together for indexing. | `500` |
|
||||
| `REDIS_HOST` | The host for the Valkey (Redis) service. | `valkey` |
|
||||
| `REDIS_PORT` | The port for the Valkey (Redis) service. | `6379` |
|
||||
| `REDIS_USER` | Optional Redis username if ACLs are used. | |
|
||||
| `REDIS_PASSWORD` | The password for the Valkey (Redis) service. | `defaultredispassword` |
|
||||
| `REDIS_TLS_ENABLED` | Enable or disable TLS for Redis. | `false` |
|
||||
|
||||
#### Storage Settings
|
||||
|
||||
@@ -95,24 +125,34 @@ These variables are used by `docker-compose.yml` to configure the services.
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------- | ------------------------- |
|
||||
| `STORAGE_TYPE` | The storage backend to use (`local` or `s3`). | `local` |
|
||||
| `BODY_SIZE_LIMIT` | The maximum request body size for uploads. Can be a number in bytes or a string with a unit (e.g., `100M`). | `100M` |
|
||||
| `STORAGE_LOCAL_ROOT_PATH` | The root path for local file storage. | `/var/data/open-archiver` |
|
||||
| `STORAGE_LOCAL_ROOT_PATH` | The root path for Open Archiver app data. | `/var/data/open-archiver` |
|
||||
| `STORAGE_S3_ENDPOINT` | The endpoint for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_BUCKET` | The bucket name for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_ACCESS_KEY_ID` | The access key ID for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_SECRET_ACCESS_KEY` | The secret access key for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_REGION` | The region for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_FORCE_PATH_STYLE` | Force path-style addressing for S3 (optional). | `false` |
|
||||
| `STORAGE_ENCRYPTION_KEY` | A 32-byte hex string for AES-256 encryption of files at rest. If not set, files will not be encrypted. | |
|
||||
|
||||
#### Security & Authentication
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ---------------- | ------------------------------------------------------------------- | ------------------------------------------ |
|
||||
| `JWT_SECRET` | A secret key for signing JWT tokens. | `a-very-secret-key-that-you-should-change` |
|
||||
| `JWT_EXPIRES_IN` | The expiration time for JWT tokens. | `7d` |
|
||||
| `SUPER_API_KEY` | An API key with super admin privileges. | |
|
||||
| `ENCRYPTION_KEY` | A 32-byte hex string for encrypting sensitive data in the database. | |
|
||||
| Variable | Description | Default Value |
|
||||
| -------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
|
||||
| `ENABLE_DELETION` | Enable or disable deletion of emails and ingestion sources. If this option is not set, or is set to any value other than `true`, deletion will be disabled for the entire instance. | `false` |
|
||||
| `JWT_SECRET` | A secret key for signing JWT tokens. | `a-very-secret-key-that-you-should-change` |
|
||||
| `JWT_EXPIRES_IN` | The expiration time for JWT tokens. | `7d` |
|
||||
| ~~`SUPER_API_KEY`~~ (Deprecated) | An API key with super admin privileges. (The SUPER_API_KEY is deprecated since v0.3.0 after we roll out the role-based access control system.) | |
|
||||
| `RATE_LIMIT_WINDOW_MS` | The window in milliseconds for which API requests are checked. | `900000` (15 minutes) |
|
||||
| `RATE_LIMIT_MAX_REQUESTS` | The maximum number of API requests allowed from an IP within the window. | `100` |
|
||||
| `ENCRYPTION_KEY` | A 32-byte hex string for encrypting sensitive data in the database. | |
|
||||
|
||||
## 3. Run the Application
|
||||
#### Apache Tika Integration
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------ |
|
||||
| `TIKA_URL` | Optional. The URL of an Apache Tika server for advanced text extraction from attachments. If not set, the application falls back to built-in parsers for PDF, Word, and Excel files. | `http://tika:9998` |
|
||||
|
||||
## 4. Run the Application
|
||||
|
||||
Once you have configured your `.env` file, you can start all the services using Docker Compose:
|
||||
|
||||
@@ -132,13 +172,15 @@ You can check the status of the running containers with:
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
## 4. Access the Application
|
||||
## 5. Access the Application
|
||||
|
||||
Once the services are running, you can access the Open Archiver web interface by navigating to `http://localhost:3000` in your web browser.
|
||||
|
||||
You can log in with the `ADMIN_EMAIL` and `ADMIN_PASSWORD` you configured in your `.env` file.
|
||||
Upon first visit, you will be redirected to the `/setup` page where you can set up your admin account. Make sure you are the first person who accesses the instance.
|
||||
|
||||
## 5. Next Steps
|
||||
If you are not redirected to the `/setup` page but instead see the login page, there might be something wrong with the database. Restart the service and try again.
|
||||
|
||||
## 6. Next Steps
|
||||
|
||||
After successfully deploying and logging into Open Archiver, the next step is to configure your ingestion sources to start archiving emails.
|
||||
|
||||
@@ -210,9 +252,9 @@ If you are using local storage to store your emails, based on your `docker-compo
|
||||
|
||||
Run this command to see all the volumes on your system:
|
||||
|
||||
```bash
|
||||
docker volume ls
|
||||
```
|
||||
```bash
|
||||
docker volume ls
|
||||
```
|
||||
|
||||
2. **Identify the correct volume**:
|
||||
|
||||
@@ -222,28 +264,28 @@ Look through the list for a volume name that ends with `_archiver-data`. The par
|
||||
|
||||
Once you've identified the correct volume name, use it in the `inspect` command. For example:
|
||||
|
||||
```bash
|
||||
docker volume inspect <your_volume_name_here>
|
||||
```
|
||||
```bash
|
||||
docker volume inspect <your_volume_name_here>
|
||||
```
|
||||
|
||||
This will give you the correct `Mountpoint` path where your data is being stored. It will look something like this (the exact path will vary depending on your system):
|
||||
|
||||
```json
|
||||
{
|
||||
"CreatedAt": "2025-07-25T11:22:19Z",
|
||||
"Driver": "local",
|
||||
"Labels": {
|
||||
"com.docker.compose.config-hash": "---",
|
||||
"com.docker.compose.project": "---",
|
||||
"com.docker.compose.version": "2.38.2",
|
||||
"com.docker.compose.volume": "us8wwos0o4ok4go4gc8cog84_archiver-data"
|
||||
},
|
||||
"Mountpoint": "/var/lib/docker/volumes/us8wwos0o4ok4go4gc8cog84_archiver-data/_data",
|
||||
"Name": "us8wwos0o4ok4go4gc8cog84_archiver-data",
|
||||
"Options": null,
|
||||
"Scope": "local"
|
||||
}
|
||||
```
|
||||
```json
|
||||
{
|
||||
"CreatedAt": "2025-07-25T11:22:19Z",
|
||||
"Driver": "local",
|
||||
"Labels": {
|
||||
"com.docker.compose.config-hash": "---",
|
||||
"com.docker.compose.project": "---",
|
||||
"com.docker.compose.version": "2.38.2",
|
||||
"com.docker.compose.volume": "us8wwos0o4ok4go4gc8cog84_archiver-data"
|
||||
},
|
||||
"Mountpoint": "/var/lib/docker/volumes/us8wwos0o4ok4go4gc8cog84_archiver-data/_data",
|
||||
"Name": "us8wwos0o4ok4go4gc8cog84_archiver-data",
|
||||
"Options": null,
|
||||
"Scope": "local"
|
||||
}
|
||||
```
|
||||
|
||||
In this example, the data is located at `/var/lib/docker/volumes/us8wwos0o4ok4go4gc8cog84_archiver-data/_data`. You can then `cd` into that directory to see your files.
|
||||
|
||||
@@ -257,43 +299,43 @@ Here’s how you can do it:
|
||||
|
||||
Open the `docker-compose.yml` file and find the `open-archiver` service. You're going to change the `volumes` section.
|
||||
|
||||
**Change this:**
|
||||
**Change this:**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
```
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
```
|
||||
|
||||
**To this:**
|
||||
**To this:**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- ./data/open-archiver:/var/data/open-archiver
|
||||
```
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- ./data/open-archiver:/var/data/open-archiver
|
||||
```
|
||||
|
||||
You'll also want to remove the `archiver-data` volume definition at the bottom of the file, since it's no longer needed.
|
||||
|
||||
**Remove this whole block:**
|
||||
**Remove this whole block:**
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
# ... other volumes
|
||||
archiver-data:
|
||||
driver: local
|
||||
```
|
||||
```yaml
|
||||
volumes:
|
||||
# ... other volumes
|
||||
archiver-data:
|
||||
driver: local
|
||||
```
|
||||
|
||||
2. **Restart your containers**:
|
||||
|
||||
After you've saved the changes, run the following command in your terminal to apply them. The `--force-recreate` flag will ensure the container is recreated with the new volume settings.
|
||||
|
||||
```bash
|
||||
docker-compose up -d --force-recreate
|
||||
```
|
||||
```bash
|
||||
docker-compose up -d --force-recreate
|
||||
```
|
||||
|
||||
After this, any new data will be saved directly into the `./data/open-archiver` folder in your project directory.
|
||||
|
||||
37
docs/user-guides/integrity-check.md
Normal file
37
docs/user-guides/integrity-check.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Integrity Check
|
||||
|
||||
Open Archiver allows you to verify the integrity of your archived emails and their attachments. This guide explains how the integrity check works and what the results mean.
|
||||
|
||||
## How It Works
|
||||
|
||||
When an email is archived, Open Archiver calculates a unique cryptographic signature (a SHA256 hash) for the email's raw `.eml` file and for each of its attachments. These signatures are stored in the database alongside the email's metadata.
|
||||
|
||||
The integrity check feature recalculates these signatures for the stored files and compares them to the original signatures stored in the database. This process allows you to verify that the content of your archived emails has not been altered, corrupted, or tampered with since the moment they were archived.
|
||||
|
||||
## The Integrity Report
|
||||
|
||||
When you view an email in the Open Archiver interface, an integrity report is automatically generated and displayed. This report provides a clear, at-a-glance status for the email file and each of its attachments.
|
||||
|
||||
### Statuses
|
||||
|
||||
- **Valid (Green Badge):** A "Valid" status means that the current signature of the file matches the original signature stored in the database. This is the expected status and indicates that the file's integrity is intact.
|
||||
|
||||
- **Invalid (Red Badge):** An "Invalid" status means that the current signature of the file does _not_ match the original signature. This indicates that the file's content has changed since it was archived.
|
||||
|
||||
### Reasons for an "Invalid" Status
|
||||
|
||||
If a file is marked as "Invalid," you can hover over the badge to see a reason for the failure. Common reasons include:
|
||||
|
||||
- **Stored hash does not match current hash:** This is the most common reason and indicates that the file's content has been modified. This could be due to accidental changes, data corruption, or unauthorized tampering.
|
||||
|
||||
- **Could not read attachment file from storage:** This message indicates that the file could not be read from its storage location. This could be due to a storage system issue, a file permission problem, or because the file has been deleted.
|
||||
|
||||
## What to Do If an Integrity Check Fails
|
||||
|
||||
If you encounter an "Invalid" status for an email or attachment, it is important to investigate the issue. Here are some steps you can take:
|
||||
|
||||
1. **Check Storage:** Verify that the file exists in its storage location and that its permissions are correct.
|
||||
2. **Review Audit Logs:** If you have audit logging enabled, review the logs for any unauthorized access or modifications to the file.
|
||||
3. **Restore from Backup:** If you suspect data corruption, you may need to restore the affected file from a backup.
|
||||
|
||||
The integrity check feature is a crucial tool for ensuring the long-term reliability and trustworthiness of your email archive. By regularly monitoring the integrity of your archived data, you can be confident that your records are accurate and complete.
|
||||
32
docs/user-guides/settings/system.md
Normal file
32
docs/user-guides/settings/system.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# System Settings
|
||||
|
||||
System settings allow administrators to configure the global look and theme of the application. These settings apply to all users.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Language
|
||||
|
||||
This setting determines the default display language for the application UI. The selected language will be used for all interface elements, including menus, labels, and messages.
|
||||
|
||||
> **Important:** When the language is changed, the backend (API) language will only change after a restart of the server. The frontend will update immediately.
|
||||
|
||||
Supported languages:
|
||||
|
||||
- English
|
||||
- German
|
||||
- French
|
||||
- Estonian
|
||||
- Spanish
|
||||
- Italian
|
||||
- Portuguese
|
||||
- Dutch
|
||||
- Greek
|
||||
- Japanese
|
||||
|
||||
### Default Theme
|
||||
|
||||
This setting controls the default color theme for the application. Users can choose between light, dark, or system default. The system default theme will sync with the user's operating system theme.
|
||||
|
||||
### Support Email
|
||||
|
||||
This setting allows administrators to provide a public-facing email address for user support inquiries. This email address may be displayed on error pages or in other areas where users may need to contact support.
|
||||
75
docs/user-guides/troubleshooting/cors-errors.md
Normal file
75
docs/user-guides/troubleshooting/cors-errors.md
Normal file
@@ -0,0 +1,75 @@
|
||||
# Troubleshooting CORS Errors
|
||||
|
||||
Cross-Origin Resource Sharing (CORS) is a security feature that controls how web applications in one domain can request and interact with resources in another. If not configured correctly, you may encounter errors when performing actions like uploading files.
|
||||
|
||||
This guide will help you diagnose and resolve common CORS-related issues.
|
||||
|
||||
## Symptoms
|
||||
|
||||
You may be experiencing a CORS issue if you see one of the following errors in your browser's developer console or in the application's logs:
|
||||
|
||||
- `TypeError: fetch failed`
|
||||
- `Cross-Origin Request Blocked: The Same Origin Policy disallows reading the remote resource.`
|
||||
- `Unexpected token 'C', "Cross-site"... is not valid JSON`
|
||||
- A JSON error response similar to the following:
|
||||
```json
|
||||
{
|
||||
"message": "CORS Error: This origin is not allowed.",
|
||||
"requiredOrigin": "http://localhost:3000",
|
||||
"receivedOrigin": "https://localhost:3000"
|
||||
}
|
||||
```
|
||||
|
||||
## Root Cause
|
||||
|
||||
These errors typically occur when the URL you are using to access the application in your browser does not exactly match the `APP_URL` configured in your `.env` file.
|
||||
|
||||
This can happen for several reasons:
|
||||
|
||||
- You are accessing the application via a different port.
|
||||
- You are using a reverse proxy that changes the protocol (e.g., from `http` to `https`).
|
||||
- The SvelteKit server, in a production build, is incorrectly guessing its public-facing URL.
|
||||
|
||||
## Solution
|
||||
|
||||
The solution is to ensure that the application's frontend and backend are correctly configured with the public-facing URL of your instance. This is done by setting two environment variables: `APP_URL` and `ORIGIN`.
|
||||
|
||||
1. **Open your `.env` file** in a text editor.
|
||||
|
||||
2. **Set `APP_URL`**: Define the `APP_URL` variable with the exact URL you use to access the application in your browser.
|
||||
|
||||
```env
|
||||
APP_URL=http://your-domain-or-ip:3000
|
||||
```
|
||||
|
||||
3. **Set `ORIGIN`**: The SvelteKit server requires a specific `ORIGIN` variable to correctly identify itself. This should always be set to the value of your `APP_URL`.
|
||||
|
||||
```env
|
||||
ORIGIN=$APP_URL
|
||||
```
|
||||
|
||||
By using `$APP_URL`, you ensure that both variables are always in sync.
|
||||
|
||||
### Example Configuration
|
||||
|
||||
If you are running the application locally on port `3000`, your configuration should look like this:
|
||||
|
||||
```env
|
||||
APP_URL=http://localhost:3000
|
||||
ORIGIN=$APP_URL
|
||||
```
|
||||
|
||||
If your application is behind a reverse proxy and is accessible at `https://archive.mycompany.com`, your configuration should be:
|
||||
|
||||
```env
|
||||
APP_URL=https://archive.mycompany.com
|
||||
ORIGIN=$APP_URL
|
||||
```
|
||||
|
||||
After making these changes to your `.env` file, you must restart the application for them to take effect:
|
||||
|
||||
```bash
|
||||
docker compose up -d --force-recreate
|
||||
```
|
||||
|
||||
This will ensure that the backend's CORS policy and the frontend server's origin are correctly aligned, resolving the errors.
|
||||
141
docs/user-guides/upgrade-and-migration/meilisearch-upgrade.md
Normal file
141
docs/user-guides/upgrade-and-migration/meilisearch-upgrade.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Upgrading Meilisearch
|
||||
|
||||
Meilisearch, the search engine used by Open Archiver, requires a manual data migration process when upgrading to a new version. This is because Meilisearch databases are only compatible with the specific version that created them.
|
||||
|
||||
If an Open Archiver upgrade includes a major Meilisearch version change, you will need to migrate your search index by following the process below.
|
||||
|
||||
## Experimental: Dumpless Upgrade
|
||||
|
||||
> **Warning:** This feature is currently **experimental**. We do not recommend using it for production environments until it is marked as stable. Please use the [standard migration process](#standard-migration-process-recommended) instead. Proceed with caution.
|
||||
|
||||
Meilisearch recently introduced an experimental "dumpless" upgrade method. This allows you to migrate the database to a new Meilisearch version without manually creating and importing a dump. However, please note that **dumpless upgrades are not currently atomic**. If the process fails, your database may become corrupted, resulting in data loss.
|
||||
|
||||
**Prerequisite: Create a Snapshot**
|
||||
|
||||
Before attempting a dumpless upgrade, you **must** take a snapshot of your instance. This ensures you have a recovery point if the upgrade fails. Learn how to create snapshots in the [official Meilisearch documentation](https://www.meilisearch.com/docs/learn/data_backup/snapshots).
|
||||
|
||||
### How to Enable
|
||||
|
||||
To perform a dumpless upgrade, you need to configure your Meilisearch instance with the experimental flag. You can do this in one of two ways:
|
||||
|
||||
**Option 1: Using an Environment Variable**
|
||||
|
||||
Add the `MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE` environment variable to your `docker-compose.yml` file for the Meilisearch service.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
environment:
|
||||
- MEILI_MASTER_KEY=${MEILI_MASTER_KEY}
|
||||
- MEILI_EXPERIMENTAL_DUMPLESS_UPGRADE=true
|
||||
```
|
||||
|
||||
**Option 2: Using a CLI Option**
|
||||
|
||||
Alternatively, you can pass the `--experimental-dumpless-upgrade` flag in the command section of your `docker-compose.yml`.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.x # The new version you want to upgrade to
|
||||
command: meilisearch --experimental-dumpless-upgrade
|
||||
```
|
||||
|
||||
After updating your configuration, restart your container:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Meilisearch will attempt to migrate your database to the new version automatically.
|
||||
|
||||
---
|
||||
|
||||
## Standard Migration Process (Recommended)
|
||||
|
||||
For self-hosted instances using Docker Compose, the recommended migration process involves creating a data dump from your current Meilisearch instance, upgrading the Docker image, and then importing that dump into the new version.
|
||||
|
||||
### Step 1: Create a Dump
|
||||
|
||||
Before upgrading, you must create a dump of your existing Meilisearch data. You can do this by sending a POST request to the `/dumps` endpoint of the Meilisearch API.
|
||||
|
||||
1. **Find your Meilisearch container name**:
|
||||
|
||||
```bash
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
Look for the service name that corresponds to Meilisearch, usually `meilisearch`.
|
||||
|
||||
2. **Execute the dump command**:
|
||||
You will need your Meilisearch Admin API key, which can be found in your `.env` file as `MEILI_MASTER_KEY`.
|
||||
|
||||
```bash
|
||||
curl -X POST 'http://localhost:7700/dumps' \
|
||||
-H "Authorization: Bearer YOUR_MEILI_MASTER_KEY"
|
||||
```
|
||||
|
||||
This will start the dump creation process. The dump file will be created inside the `meili_data` volume used by the Meilisearch container.
|
||||
|
||||
3. **Monitor the dump status**:
|
||||
The dump creation request returns a `taskUid`. You can use this to check the status of the dump.
|
||||
|
||||
For more details on dump and import, see the [official Meilisearch documentation](https://www.meilisearch.com/docs/learn/update_and_migration/updating).
|
||||
|
||||
### Step 2: Upgrade Your Open Archiver Instance
|
||||
|
||||
Once the dump is successfully created, you can proceed with the standard Open Archiver upgrade process.
|
||||
|
||||
1. **Pull the latest changes and Docker images**:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
2. **Stop the running services**:
|
||||
```bash
|
||||
docker compose down
|
||||
```
|
||||
|
||||
### Step 3: Import the Dump
|
||||
|
||||
Now, you need to restart the services while telling Meilisearch to import from your dump file.
|
||||
|
||||
1. **Modify `docker-compose.yml`**:
|
||||
You need to temporarily add the `--import-dump` flag to the Meilisearch service command. Find the `meilisearch` service in your `docker-compose.yml` and modify the `command` section.
|
||||
|
||||
You will need the name of your dump file. It will be a `.dump` file located in the directory mapped to `/meili_data` inside the container.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
meilisearch:
|
||||
# ... other service config
|
||||
command:
|
||||
[
|
||||
'--master-key=${MEILI_MASTER_KEY}',
|
||||
'--env=production',
|
||||
'--import-dump=/meili_data/dumps/YOUR_DUMP_FILE.dump',
|
||||
]
|
||||
```
|
||||
|
||||
2. **Restart the services**:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
Meilisearch will now start and import the data from the dump file. This may take some time depending on the size of your index.
|
||||
|
||||
### Step 4: Clean Up
|
||||
|
||||
Once the import is complete and you have verified that your search is working correctly, you should remove the `--import-dump` flag from your `docker-compose.yml` to prevent it from running on every startup.
|
||||
|
||||
1. **Remove the `--import-dump` line** from the `command` section of the `meilisearch` service in `docker-compose.yml`.
|
||||
2. **Restart the services** one last time:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Your Meilisearch instance is now upgraded and running with your migrated data.
|
||||
|
||||
For more advanced scenarios or troubleshooting, please refer to the **[official Meilisearch migration guide](https://www.meilisearch.com/docs/learn/update_and_migration/updating)**.
|
||||
42
docs/user-guides/upgrade-and-migration/upgrade.md
Normal file
42
docs/user-guides/upgrade-and-migration/upgrade.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Upgrading Your Instance
|
||||
|
||||
This guide provides instructions for upgrading your Open Archiver instance to the latest version.
|
||||
|
||||
## Checking for New Versions
|
||||
|
||||
Open Archiver automatically checks for new versions and will display a notification in the footer of the web interface when an update is available. You can find a list of all releases and their release notes on the [GitHub Releases](https://github.com/LogicLabs-OU/OpenArchiver/releases) page.
|
||||
|
||||
## Upgrading Your Instance
|
||||
|
||||
To upgrade your Open Archiver instance, follow these steps:
|
||||
|
||||
1. **Pull the latest changes from the repository**:
|
||||
|
||||
```bash
|
||||
git pull
|
||||
```
|
||||
|
||||
2. **Pull the latest Docker images**:
|
||||
|
||||
```bash
|
||||
docker compose pull
|
||||
```
|
||||
|
||||
3. **Restart the services with the new images**:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This will restart your Open Archiver instance with the latest version of the application.
|
||||
|
||||
## Migrating Data
|
||||
|
||||
When you upgrade to a new version, database migrations are applied automatically when the application starts up. This ensures that your database schema is always up-to-date with the latest version of the application.
|
||||
|
||||
No manual intervention is required for database migrations.
|
||||
|
||||
## Upgrading Meilisearch
|
||||
|
||||
When an Open Archiver update includes a major version change for Meilisearch, you will need to manually migrate your search data. This process is not covered by the standard upgrade commands.
|
||||
|
||||
For detailed instructions, please see the [Meilisearch Upgrade Guide](./meilisearch-upgrade.md).
|
||||
79
open-archiver.yml
Normal file
79
open-archiver.yml
Normal file
@@ -0,0 +1,79 @@
|
||||
# documentation: https://openarchiver.com
|
||||
# slogan: A self-hosted, open-source email archiving solution with full-text search capability.
|
||||
# tags: email archiving,email,compliance,search
|
||||
# logo: svgs/openarchiver.svg
|
||||
# port: 3000
|
||||
|
||||
services:
|
||||
open-archiver:
|
||||
image: logiclabshq/open-archiver:latest
|
||||
environment:
|
||||
- SERVICE_URL_3000
|
||||
- SERVICE_URL=${SERVICE_URL_3000}
|
||||
- PORT_BACKEND=${PORT_BACKEND:-4000}
|
||||
- PORT_FRONTEND=${PORT_FRONTEND:-3000}
|
||||
- NODE_ENV=${NODE_ENV:-production}
|
||||
- SYNC_FREQUENCY=${SYNC_FREQUENCY:-* * * * *}
|
||||
- POSTGRES_DB=${POSTGRES_DB:-open_archive}
|
||||
- POSTGRES_USER=${POSTGRES_USER:-admin}
|
||||
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
||||
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB}
|
||||
- MEILI_MASTER_KEY=${SERVICE_PASSWORD_MEILISEARCH}
|
||||
- MEILI_HOST=http://meilisearch:7700
|
||||
- REDIS_HOST=valkey
|
||||
- REDIS_PORT=6379
|
||||
- REDIS_USER=default
|
||||
- REDIS_PASSWORD=${SERVICE_PASSWORD_VALKEY}
|
||||
- REDIS_TLS_ENABLED=false
|
||||
- STORAGE_TYPE=${STORAGE_TYPE:-local}
|
||||
- STORAGE_LOCAL_ROOT_PATH=${STORAGE_LOCAL_ROOT_PATH:-/var/data/open-archiver}
|
||||
- BODY_SIZE_LIMIT=${BODY_SIZE_LIMIT:-100M}
|
||||
- STORAGE_S3_ENDPOINT=${STORAGE_S3_ENDPOINT}
|
||||
- STORAGE_S3_BUCKET=${STORAGE_S3_BUCKET}
|
||||
- STORAGE_S3_ACCESS_KEY_ID=${STORAGE_S3_ACCESS_KEY_ID}
|
||||
- STORAGE_S3_SECRET_ACCESS_KEY=${STORAGE_S3_SECRET_ACCESS_KEY}
|
||||
- STORAGE_S3_REGION=${STORAGE_S3_REGION}
|
||||
- STORAGE_S3_FORCE_PATH_STYLE=${STORAGE_S3_FORCE_PATH_STYLE:-false}
|
||||
- JWT_SECRET=${SERVICE_BASE64_128_JWT}
|
||||
- JWT_EXPIRES_IN=${JWT_EXPIRES_IN:-7d}
|
||||
- ENCRYPTION_KEY=${SERVICE_BASE64_64_ENCRYPTIONKEY}
|
||||
- RATE_LIMIT_WINDOW_MS=${RATE_LIMIT_WINDOW_MS:-60000}
|
||||
- RATE_LIMIT_MAX_REQUESTS=${RATE_LIMIT_MAX_REQUESTS:-100}
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
valkey:
|
||||
condition: service_started
|
||||
meilisearch:
|
||||
condition: service_started
|
||||
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
environment:
|
||||
- POSTGRES_DB=${POSTGRES_DB}
|
||||
- POSTGRES_USER=${POSTGRES_USER}
|
||||
- POSTGRES_PASSWORD=${SERVICE_PASSWORD_POSTGRES}
|
||||
- LC_ALL=C
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ['CMD-SHELL', 'pg_isready -U $${POSTGRES_USER} -d $${POSTGRES_DB}']
|
||||
interval: 10s
|
||||
timeout: 20s
|
||||
retries: 10
|
||||
|
||||
valkey:
|
||||
image: valkey/valkey:8-alpine
|
||||
command: valkey-server --requirepass ${SERVICE_PASSWORD_VALKEY}
|
||||
volumes:
|
||||
- valkeydata:/data
|
||||
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.15
|
||||
environment:
|
||||
- MEILI_MASTER_KEY=${SERVICE_PASSWORD_MEILISEARCH}
|
||||
- MEILI_SCHEDULE_SNAPSHOT=86400
|
||||
volumes:
|
||||
- meilidata:/meili_data
|
||||
17
package.json
17
package.json
@@ -1,16 +1,24 @@
|
||||
{
|
||||
"name": "open-archiver",
|
||||
"version": "0.4.2",
|
||||
"private": true,
|
||||
"license": "SEE LICENSE IN LICENSE file",
|
||||
"scripts": {
|
||||
"dev": "dotenv -- pnpm --filter \"./packages/*\" --parallel dev",
|
||||
"build": "pnpm --filter \"./packages/*\" build",
|
||||
"start": "dotenv -- pnpm --filter \"./packages/*\" --parallel start",
|
||||
"build:oss": "pnpm --filter \"./packages/*\" --filter \"!./packages/enterprise\" --filter \"./apps/open-archiver\" build",
|
||||
"build:enterprise": "cross-env VITE_ENTERPRISE_MODE=true pnpm build",
|
||||
"start:oss": "dotenv -- concurrently \"node apps/open-archiver/dist/index.js\" \"pnpm --filter @open-archiver/frontend start\"",
|
||||
"start:enterprise": "dotenv -- concurrently \"node apps/open-archiver-enterprise/dist/index.js\" \"pnpm --filter @open-archiver/frontend start\"",
|
||||
"dev:enterprise": "cross-env VITE_ENTERPRISE_MODE=true dotenv -- pnpm --filter \"@open-archiver/*\" --filter \"open-archiver-enterprise-app\" --parallel dev",
|
||||
"dev:oss": "dotenv -- pnpm --filter \"./packages/*\" --filter \"!./packages/@open-archiver/enterprise\" --filter \"open-archiver-app\" --parallel dev",
|
||||
"build": "pnpm --filter \"./packages/*\" --filter \"./apps/*\" build",
|
||||
"start": "dotenv -- pnpm --filter \"open-archiver-app\" --parallel start",
|
||||
"start:workers": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker\" \"pnpm --filter @open-archiver/backend start:indexing-worker\" \"pnpm --filter @open-archiver/backend start:sync-scheduler\"",
|
||||
"start:workers:dev": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker:dev\" \"pnpm --filter @open-archiver/backend start:indexing-worker:dev\" \"pnpm --filter @open-archiver/backend start:sync-scheduler:dev\"",
|
||||
"db:generate": "dotenv -- pnpm --filter @open-archiver/backend db:generate",
|
||||
"db:migrate": "dotenv -- pnpm --filter @open-archiver/backend db:migrate",
|
||||
"db:migrate:dev": "dotenv -- pnpm --filter @open-archiver/backend db:migrate:dev",
|
||||
"docker-start": "concurrently \"pnpm start:workers\" \"pnpm start\"",
|
||||
"docker-start:oss": "concurrently \"pnpm start:workers\" \"pnpm start:oss\"",
|
||||
"docker-start:enterprise": "concurrently \"pnpm start:workers\" \"pnpm start:enterprise\"",
|
||||
"docs:dev": "vitepress dev docs --port 3009",
|
||||
"docs:build": "vitepress build docs",
|
||||
"docs:preview": "vitepress preview docs",
|
||||
@@ -22,6 +30,7 @@
|
||||
"dotenv-cli": "8.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"cross-env": "^10.0.0",
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-svelte": "^3.4.0",
|
||||
"prettier-plugin-tailwindcss": "^0.6.14",
|
||||
|
||||
@@ -2,11 +2,13 @@
|
||||
"name": "@open-archiver/backend",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"license": "SEE LICENSE IN LICENSE file",
|
||||
"main": "dist/index.js",
|
||||
"types": "dist/index.d.ts",
|
||||
"scripts": {
|
||||
"dev": "ts-node-dev --respawn --transpile-only src/index.ts ",
|
||||
"build": "tsc",
|
||||
"start": "node dist/index.js",
|
||||
"build": "tsc && pnpm copy-assets",
|
||||
"dev": "tsc --watch",
|
||||
"copy-assets": "cp -r src/locales dist/locales",
|
||||
"start:ingestion-worker": "node dist/workers/ingestion.worker.js",
|
||||
"start:indexing-worker": "node dist/workers/indexing.worker.js",
|
||||
"start:sync-scheduler": "node dist/jobs/schedulers/sync-scheduler.js",
|
||||
@@ -30,6 +32,7 @@
|
||||
"bcryptjs": "^3.0.2",
|
||||
"bullmq": "^5.56.3",
|
||||
"busboy": "^1.6.0",
|
||||
"cors": "^2.8.5",
|
||||
"cross-fetch": "^4.1.0",
|
||||
"deepmerge-ts": "^7.1.5",
|
||||
"dotenv": "^17.2.0",
|
||||
@@ -40,6 +43,9 @@
|
||||
"express-validator": "^7.2.1",
|
||||
"google-auth-library": "^10.1.0",
|
||||
"googleapis": "^152.0.0",
|
||||
"i18next": "^25.4.2",
|
||||
"i18next-fs-backend": "^2.6.0",
|
||||
"i18next-http-middleware": "^3.8.0",
|
||||
"imapflow": "^1.0.191",
|
||||
"jose": "^6.0.11",
|
||||
"mailparser": "^3.7.4",
|
||||
@@ -54,23 +60,22 @@
|
||||
"pst-extractor": "^1.11.0",
|
||||
"reflect-metadata": "^0.2.2",
|
||||
"sqlite3": "^5.1.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"xlsx": "^0.18.5",
|
||||
"yauzl": "^3.2.0"
|
||||
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz",
|
||||
"yauzl": "^3.2.0",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@bull-board/api": "^6.11.0",
|
||||
"@bull-board/express": "^6.11.0",
|
||||
"@types/archiver": "^6.0.3",
|
||||
"@types/busboy": "^1.5.4",
|
||||
"@types/cors": "^2.8.19",
|
||||
"@types/express": "^5.0.3",
|
||||
"@types/mailparser": "^3.4.6",
|
||||
"@types/microsoft-graph": "^2.40.1",
|
||||
"@types/multer": "^2.0.0",
|
||||
"@types/node": "^24.0.12",
|
||||
"@types/yauzl": "^2.10.3",
|
||||
"bull-board": "^2.1.3",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
}
|
||||
|
||||
81
packages/backend/src/api/controllers/api-key.controller.ts
Normal file
81
packages/backend/src/api/controllers/api-key.controller.ts
Normal file
@@ -0,0 +1,81 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { ApiKeyService } from '../../services/ApiKeyService';
|
||||
import { z } from 'zod';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import { config } from '../../config';
|
||||
|
||||
const generateApiKeySchema = z.object({
|
||||
name: z
|
||||
.string()
|
||||
.min(1, 'API kay name must be more than 1 characters')
|
||||
.max(255, 'API kay name must not be more than 255 characters'),
|
||||
expiresInDays: z
|
||||
.number()
|
||||
.int()
|
||||
.positive('Only positive number is allowed')
|
||||
.max(730, 'The API key must expire within 2 years / 730 days.'),
|
||||
});
|
||||
export class ApiKeyController {
|
||||
private userService = new UserService();
|
||||
public generateApiKey = async (req: Request, res: Response) => {
|
||||
try {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { name, expiresInDays } = generateApiKeySchema.parse(req.body);
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
const key = await ApiKeyService.generate(
|
||||
userId,
|
||||
name,
|
||||
expiresInDays,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
|
||||
res.status(201).json({ key });
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: req.t('api.requestBodyInvalid'), errors: error.message });
|
||||
}
|
||||
res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public getApiKeys = async (req: Request, res: Response) => {
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
const keys = await ApiKeyService.getKeys(userId);
|
||||
|
||||
res.status(200).json(keys);
|
||||
};
|
||||
|
||||
public deleteApiKey = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { id } = req.params;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
await ApiKeyService.deleteKey(id, userId, actor, req.ip || 'unknown');
|
||||
|
||||
res.status(204).send({ message: req.t('apiKeys.deleteSuccess') });
|
||||
};
|
||||
}
|
||||
@@ -1,8 +1,10 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { ArchivedEmailService } from '../../services/ArchivedEmailService';
|
||||
import { config } from '../../config';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import { checkDeletionEnabled } from '../../helpers/deletionGuard';
|
||||
|
||||
export class ArchivedEmailController {
|
||||
private userService = new UserService();
|
||||
public getArchivedEmails = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { ingestionSourceId } = req.params;
|
||||
@@ -11,7 +13,7 @@ export class ArchivedEmailController {
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
|
||||
const result = await ArchivedEmailService.getArchivedEmails(
|
||||
@@ -23,7 +25,7 @@ export class ArchivedEmailController {
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
console.error('Get archived emails error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -33,37 +35,52 @@ export class ArchivedEmailController {
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
|
||||
const email = await ArchivedEmailService.getArchivedEmailById(id, userId);
|
||||
const email = await ArchivedEmailService.getArchivedEmailById(
|
||||
id,
|
||||
userId,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
if (!email) {
|
||||
return res.status(404).json({ message: 'Archived email not found' });
|
||||
return res.status(404).json({ message: req.t('archivedEmail.notFound') });
|
||||
}
|
||||
return res.status(200).json(email);
|
||||
} catch (error) {
|
||||
console.error(`Get archived email by id ${req.params.id} error:`, error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public deleteArchivedEmail = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
checkDeletionEnabled();
|
||||
const { id } = req.params;
|
||||
await ArchivedEmailService.deleteArchivedEmail(id);
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
await ArchivedEmailService.deleteArchivedEmail(id, actor, req.ip || 'unknown');
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
console.error(`Delete archived email ${req.params.id} error:`, error);
|
||||
if (error instanceof Error) {
|
||||
if (error.message === 'Archived email not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('archivedEmail.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -27,7 +27,7 @@ export class AuthController {
|
||||
const { email, password, first_name, last_name } = req.body;
|
||||
|
||||
if (!email || !password || !first_name || !last_name) {
|
||||
return res.status(400).json({ message: 'Email, password, and name are required' });
|
||||
return res.status(400).json({ message: req.t('auth.setup.allFieldsRequired') });
|
||||
}
|
||||
|
||||
try {
|
||||
@@ -37,18 +37,18 @@ export class AuthController {
|
||||
const userCount = Number(userCountResult[0].count);
|
||||
|
||||
if (userCount > 0) {
|
||||
return res.status(403).json({ message: 'Setup has already been completed.' });
|
||||
return res.status(403).json({ message: req.t('auth.setup.alreadyCompleted') });
|
||||
}
|
||||
|
||||
const newUser = await this.#userService.createAdminUser(
|
||||
{ email, password, first_name, last_name },
|
||||
true
|
||||
);
|
||||
const result = await this.#authService.login(email, password);
|
||||
const result = await this.#authService.login(email, password, req.ip || 'unknown');
|
||||
return res.status(201).json(result);
|
||||
} catch (error) {
|
||||
console.error('Setup error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -56,20 +56,20 @@ export class AuthController {
|
||||
const { email, password } = req.body;
|
||||
|
||||
if (!email || !password) {
|
||||
return res.status(400).json({ message: 'Email and password are required' });
|
||||
return res.status(400).json({ message: req.t('auth.login.emailAndPasswordRequired') });
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.#authService.login(email, password);
|
||||
const result = await this.#authService.login(email, password, req.ip || 'unknown');
|
||||
|
||||
if (!result) {
|
||||
return res.status(401).json({ message: 'Invalid credentials' });
|
||||
return res.status(401).json({ message: req.t('auth.login.invalidCredentials') });
|
||||
}
|
||||
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
console.error('Login error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -121,10 +121,10 @@ export class AuthController {
|
||||
);
|
||||
return res.status(200).json({ needsSetup: false });
|
||||
}
|
||||
return res.status(200).json({ needsSetupUser });
|
||||
return res.status(200).json({ needsSetup: needsSetupUser });
|
||||
} catch (error) {
|
||||
console.error('Status check error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ import { IamService } from '../../services/IamService';
|
||||
import { PolicyValidator } from '../../iam-policy/policy-validator';
|
||||
import type { CaslPolicy } from '@open-archiver/types';
|
||||
import { logger } from '../../config/logger';
|
||||
import { config } from '../../config';
|
||||
|
||||
export class IamController {
|
||||
#iamService: IamService;
|
||||
@@ -22,7 +21,7 @@ export class IamController {
|
||||
}
|
||||
res.status(200).json(roles);
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Failed to get roles.' });
|
||||
res.status(500).json({ message: req.t('iam.failedToGetRoles') });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -34,21 +33,18 @@ export class IamController {
|
||||
if (role) {
|
||||
res.status(200).json(role);
|
||||
} else {
|
||||
res.status(404).json({ message: 'Role not found.' });
|
||||
res.status(404).json({ message: req.t('iam.roleNotFound') });
|
||||
}
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Failed to get role.' });
|
||||
res.status(500).json({ message: req.t('iam.failedToGetRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public createRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const { name, policies } = req.body;
|
||||
|
||||
if (!name || !policies) {
|
||||
res.status(400).json({ message: 'Missing required fields: name and policy.' });
|
||||
res.status(400).json({ message: req.t('iam.missingRoleFields') });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -56,7 +52,7 @@ export class IamController {
|
||||
for (const statement of policies) {
|
||||
const { valid, reason } = PolicyValidator.isValid(statement as CaslPolicy);
|
||||
if (!valid) {
|
||||
res.status(400).json({ message: `Invalid policy statement: ${reason}` });
|
||||
res.status(400).json({ message: `${req.t('iam.invalidPolicy')} ${reason}` });
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -64,33 +60,27 @@ export class IamController {
|
||||
res.status(201).json(role);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
res.status(500).json({ message: 'Failed to create role.' });
|
||||
res.status(500).json({ message: req.t('iam.failedToCreateRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public deleteRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const { id } = req.params;
|
||||
|
||||
try {
|
||||
await this.#iamService.deleteRole(id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Failed to delete role.' });
|
||||
res.status(500).json({ message: req.t('iam.failedToDeleteRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public updateRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const { id } = req.params;
|
||||
const { name, policies } = req.body;
|
||||
|
||||
if (!name && !policies) {
|
||||
res.status(400).json({ message: 'Missing fields to update: name or policies.' });
|
||||
res.status(400).json({ message: req.t('iam.missingUpdateFields') });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -98,7 +88,7 @@ export class IamController {
|
||||
for (const statement of policies) {
|
||||
const { valid, reason } = PolicyValidator.isValid(statement as CaslPolicy);
|
||||
if (!valid) {
|
||||
res.status(400).json({ message: `Invalid policy statement: ${reason}` });
|
||||
res.status(400).json({ message: `${req.t('iam.invalidPolicy')} ${reason}` });
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -108,7 +98,7 @@ export class IamController {
|
||||
const role = await this.#iamService.updateRole(id, { name, policies });
|
||||
res.status(200).json(role);
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Failed to update role.' });
|
||||
res.status(500).json({ message: req.t('iam.failedToUpdateRole') });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -7,9 +7,11 @@ import {
|
||||
SafeIngestionSource,
|
||||
} from '@open-archiver/types';
|
||||
import { logger } from '../../config/logger';
|
||||
import { config } from '../../config';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import { checkDeletionEnabled } from '../../helpers/deletionGuard';
|
||||
|
||||
export class IngestionController {
|
||||
private userService = new UserService();
|
||||
/**
|
||||
* Converts an IngestionSource object to a safe version for client-side consumption
|
||||
* by removing the credentials.
|
||||
@@ -22,24 +24,29 @@ export class IngestionController {
|
||||
}
|
||||
|
||||
public create = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
const dto: CreateIngestionSourceDto = req.body;
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const newSource = await IngestionService.create(dto, userId);
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const newSource = await IngestionService.create(
|
||||
dto,
|
||||
userId,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
const safeSource = this.toSafeIngestionSource(newSource);
|
||||
return res.status(201).json(safeSource);
|
||||
} catch (error: any) {
|
||||
logger.error({ err: error }, 'Create ingestion source error');
|
||||
// Return a 400 Bad Request for connection errors
|
||||
return res.status(400).json({
|
||||
message:
|
||||
error.message || 'Failed to create ingestion source due to a connection error.',
|
||||
message: error.message || req.t('ingestion.failedToCreate'),
|
||||
});
|
||||
}
|
||||
};
|
||||
@@ -48,14 +55,14 @@ export class IngestionController {
|
||||
try {
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const sources = await IngestionService.findAll(userId);
|
||||
const safeSources = sources.map(this.toSafeIngestionSource);
|
||||
return res.status(200).json(safeSources);
|
||||
} catch (error) {
|
||||
console.error('Find all ingestion sources error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
@@ -68,97 +75,127 @@ export class IngestionController {
|
||||
} catch (error) {
|
||||
console.error(`Find ingestion source by id ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public update = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const dto: UpdateIngestionSourceDto = req.body;
|
||||
const updatedSource = await IngestionService.update(id, dto);
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const updatedSource = await IngestionService.update(
|
||||
id,
|
||||
dto,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
const safeSource = this.toSafeIngestionSource(updatedSource);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
console.error(`Update ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public delete = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
checkDeletionEnabled();
|
||||
const { id } = req.params;
|
||||
await IngestionService.delete(id);
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
await IngestionService.delete(id, actor, req.ip || 'unknown');
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
console.error(`Delete ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
} else if (error instanceof Error) {
|
||||
return res.status(400).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public triggerInitialImport = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.triggerInitialImport(id);
|
||||
return res.status(202).json({ message: 'Initial import triggered successfully.' });
|
||||
return res.status(202).json({ message: req.t('ingestion.initialImportTriggered') });
|
||||
} catch (error) {
|
||||
console.error(`Trigger initial import for ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public pause = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const updatedSource = await IngestionService.update(id, { status: 'paused' });
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const updatedSource = await IngestionService.update(
|
||||
id,
|
||||
{ status: 'paused' },
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
const safeSource = this.toSafeIngestionSource(updatedSource);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
console.error(`Pause ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public triggerForceSync = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.triggerForceSync(id);
|
||||
return res.status(202).json({ message: 'Force sync triggered successfully.' });
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const actor = await this.userService.findById(userId);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
await IngestionService.triggerForceSync(id, actor, req.ip || 'unknown');
|
||||
return res.status(202).json({ message: req.t('ingestion.forceSyncTriggered') });
|
||||
} catch (error) {
|
||||
console.error(`Trigger force sync for ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
29
packages/backend/src/api/controllers/integrity.controller.ts
Normal file
29
packages/backend/src/api/controllers/integrity.controller.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { IntegrityService } from '../../services/IntegrityService';
|
||||
import { z } from 'zod';
|
||||
|
||||
const checkIntegritySchema = z.object({
|
||||
id: z.string().uuid(),
|
||||
});
|
||||
|
||||
export class IntegrityController {
|
||||
private integrityService = new IntegrityService();
|
||||
|
||||
public checkIntegrity = async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { id } = checkIntegritySchema.parse(req.params);
|
||||
const results = await this.integrityService.checkEmailIntegrity(id);
|
||||
res.status(200).json(results);
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: req.t('api.requestBodyInvalid'), errors: error.message });
|
||||
}
|
||||
if (error instanceof Error && error.message === 'Archived email not found') {
|
||||
return res.status(404).json({ message: req.t('errors.notFound') });
|
||||
}
|
||||
res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
42
packages/backend/src/api/controllers/jobs.controller.ts
Normal file
42
packages/backend/src/api/controllers/jobs.controller.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { JobsService } from '../../services/JobsService';
|
||||
import {
|
||||
IGetQueueJobsRequestParams,
|
||||
IGetQueueJobsRequestQuery,
|
||||
JobStatus,
|
||||
} from '@open-archiver/types';
|
||||
|
||||
export class JobsController {
|
||||
private jobsService: JobsService;
|
||||
|
||||
constructor() {
|
||||
this.jobsService = new JobsService();
|
||||
}
|
||||
|
||||
public getQueues = async (req: Request, res: Response) => {
|
||||
try {
|
||||
const queues = await this.jobsService.getQueues();
|
||||
res.status(200).json({ queues });
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Error fetching queues', error });
|
||||
}
|
||||
};
|
||||
|
||||
public getQueueJobs = async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { queueName } = req.params as unknown as IGetQueueJobsRequestParams;
|
||||
const { status, page, limit } = req.query as unknown as IGetQueueJobsRequestQuery;
|
||||
const pageNumber = parseInt(page, 10) || 1;
|
||||
const limitNumber = parseInt(limit, 10) || 10;
|
||||
const queueDetails = await this.jobsService.getQueueDetails(
|
||||
queueName,
|
||||
status,
|
||||
pageNumber,
|
||||
limitNumber
|
||||
);
|
||||
res.status(200).json(queueDetails);
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: 'Error fetching queue jobs', error });
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -15,12 +15,12 @@ export class SearchController {
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!userId) {
|
||||
res.status(401).json({ message: 'Unauthorized' });
|
||||
res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (!keywords) {
|
||||
res.status(400).json({ message: 'Keywords are required' });
|
||||
res.status(400).json({ message: req.t('search.keywordsRequired') });
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -31,12 +31,13 @@ export class SearchController {
|
||||
limit: limit ? parseInt(limit as string) : 10,
|
||||
matchingStrategy: matchingStrategy as MatchingStrategies,
|
||||
},
|
||||
userId
|
||||
userId,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
|
||||
res.status(200).json(results);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'An unknown error occurred';
|
||||
const message = error instanceof Error ? error.message : req.t('errors.unknown');
|
||||
res.status(500).json({ message });
|
||||
}
|
||||
};
|
||||
|
||||
38
packages/backend/src/api/controllers/settings.controller.ts
Normal file
38
packages/backend/src/api/controllers/settings.controller.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { SettingsService } from '../../services/SettingsService';
|
||||
import { UserService } from '../../services/UserService';
|
||||
|
||||
const settingsService = new SettingsService();
|
||||
const userService = new UserService();
|
||||
|
||||
export const getSystemSettings = async (req: Request, res: Response) => {
|
||||
try {
|
||||
const settings = await settingsService.getSystemSettings();
|
||||
res.status(200).json(settings);
|
||||
} catch (error) {
|
||||
// A more specific error could be logged here
|
||||
res.status(500).json({ message: req.t('settings.failedToRetrieve') });
|
||||
}
|
||||
};
|
||||
|
||||
export const updateSystemSettings = async (req: Request, res: Response) => {
|
||||
try {
|
||||
// Basic validation can be performed here if necessary
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const updatedSettings = await settingsService.updateSystemSettings(
|
||||
req.body,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
res.status(200).json(updatedSettings);
|
||||
} catch (error) {
|
||||
// A more specific error could be logged here
|
||||
res.status(500).json({ message: req.t('settings.failedToUpdate') });
|
||||
}
|
||||
};
|
||||
@@ -10,7 +10,7 @@ export class StorageController {
|
||||
const unsafePath = req.query.path as string;
|
||||
|
||||
if (!unsafePath) {
|
||||
res.status(400).send('File path is required');
|
||||
res.status(400).send(req.t('storage.filePathRequired'));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ export class StorageController {
|
||||
const fullPath = path.join(basePath, normalizedPath);
|
||||
|
||||
if (!fullPath.startsWith(basePath)) {
|
||||
res.status(400).send('Invalid file path');
|
||||
res.status(400).send(req.t('storage.invalidFilePath'));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ export class StorageController {
|
||||
try {
|
||||
const fileExists = await this.storageService.exists(safePath);
|
||||
if (!fileExists) {
|
||||
res.status(404).send('File not found');
|
||||
res.status(404).send(req.t('storage.fileNotFound'));
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ export class StorageController {
|
||||
fileStream.pipe(res);
|
||||
} catch (error) {
|
||||
console.error('Error downloading file:', error);
|
||||
res.status(500).send('Error downloading file');
|
||||
res.status(500).send(req.t('storage.downloadError'));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import { config } from '../../config/index';
|
||||
export const uploadFile = async (req: Request, res: Response) => {
|
||||
const storage = new StorageService();
|
||||
const bb = busboy({ headers: req.headers });
|
||||
const uploads: Promise<void>[] = [];
|
||||
let filePath = '';
|
||||
let originalFilename = '';
|
||||
|
||||
@@ -14,10 +15,11 @@ export const uploadFile = async (req: Request, res: Response) => {
|
||||
originalFilename = filename.filename;
|
||||
const uuid = randomUUID();
|
||||
filePath = `${config.storage.openArchiverFolderName}/tmp/${uuid}-${originalFilename}`;
|
||||
storage.put(filePath, file);
|
||||
uploads.push(storage.put(filePath, file));
|
||||
});
|
||||
|
||||
bb.on('finish', () => {
|
||||
bb.on('finish', async () => {
|
||||
await Promise.all(uploads);
|
||||
res.json({ filePath });
|
||||
});
|
||||
|
||||
|
||||
@@ -15,52 +15,131 @@ export const getUsers = async (req: Request, res: Response) => {
|
||||
export const getUser = async (req: Request, res: Response) => {
|
||||
const user = await userService.findById(req.params.id);
|
||||
if (!user) {
|
||||
return res.status(404).json({ message: 'User not found' });
|
||||
return res.status(404).json({ message: req.t('user.notFound') });
|
||||
}
|
||||
res.json(user);
|
||||
};
|
||||
|
||||
export const createUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const { email, first_name, last_name, password, roleId } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
const newUser = await userService.createUser(
|
||||
{ email, first_name, last_name, password },
|
||||
roleId
|
||||
roleId,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
res.status(201).json(newUser);
|
||||
};
|
||||
|
||||
export const updateUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const { email, first_name, last_name, roleId } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const updatedUser = await userService.updateUser(
|
||||
req.params.id,
|
||||
{ email, first_name, last_name },
|
||||
roleId
|
||||
roleId,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
if (!updatedUser) {
|
||||
return res.status(404).json({ message: 'User not found' });
|
||||
return res.status(404).json({ message: req.t('user.notFound') });
|
||||
}
|
||||
res.json(updatedUser);
|
||||
};
|
||||
|
||||
export const deleteUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: 'This operation is not allowed in demo mode.' });
|
||||
}
|
||||
const userCountResult = await db.select({ count: sql<number>`count(*)` }).from(schema.users);
|
||||
console.log('iusercount,', userCountResult[0].count);
|
||||
|
||||
const isOnlyUser = Number(userCountResult[0].count) === 1;
|
||||
if (isOnlyUser) {
|
||||
return res.status(400).json({
|
||||
message: 'You are trying to delete the only user in the database, this is not allowed.',
|
||||
message: req.t('user.cannotDeleteOnlyUser'),
|
||||
});
|
||||
}
|
||||
await userService.deleteUser(req.params.id);
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
await userService.deleteUser(req.params.id, actor, req.ip || 'unknown');
|
||||
res.status(204).send();
|
||||
};
|
||||
|
||||
export const getProfile = async (req: Request, res: Response) => {
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const user = await userService.findById(req.user.sub);
|
||||
if (!user) {
|
||||
return res.status(404).json({ message: req.t('user.notFound') });
|
||||
}
|
||||
res.json(user);
|
||||
};
|
||||
|
||||
export const updateProfile = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { email, first_name, last_name } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const updatedUser = await userService.updateUser(
|
||||
req.user.sub,
|
||||
{ email, first_name, last_name },
|
||||
undefined,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
res.json(updatedUser);
|
||||
};
|
||||
|
||||
export const updatePassword = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { currentPassword, newPassword } = req.body;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const actor = await userService.findById(req.user.sub);
|
||||
if (!actor) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
try {
|
||||
await userService.updatePassword(
|
||||
req.user.sub,
|
||||
currentPassword,
|
||||
newPassword,
|
||||
actor,
|
||||
req.ip || 'unknown'
|
||||
);
|
||||
res.status(200).json({ message: 'Password updated successfully' });
|
||||
} catch (e: any) {
|
||||
if (e.message === 'Invalid current password') {
|
||||
return res.status(400).json({ message: e.message });
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,10 +1,21 @@
|
||||
import rateLimit from 'express-rate-limit';
|
||||
import { rateLimit, ipKeyGenerator } from 'express-rate-limit';
|
||||
import { config } from '../../config';
|
||||
|
||||
// Rate limiter to prevent brute-force attacks on the login endpoint
|
||||
export const loginRateLimiter = rateLimit({
|
||||
windowMs: 15 * 60 * 1000, // 15 minutes
|
||||
max: 10, // Limit each IP to 10 login requests per windowMs
|
||||
message: 'Too many login attempts from this IP, please try again after 15 minutes',
|
||||
standardHeaders: true, // Return rate limit info in the `RateLimit-*` headers
|
||||
legacyHeaders: false, // Disable the `X-RateLimit-*` headers
|
||||
const windowInMinutes = Math.ceil(config.api.rateLimit.windowMs / 60000);
|
||||
|
||||
export const rateLimiter = rateLimit({
|
||||
windowMs: config.api.rateLimit.windowMs,
|
||||
max: config.api.rateLimit.max,
|
||||
keyGenerator: (req, res) => {
|
||||
// Use the real IP address of the client, even if it's behind a proxy.
|
||||
// `app.set('trust proxy', true)` in `server.ts`.
|
||||
return ipKeyGenerator(req.ip || 'unknown');
|
||||
},
|
||||
message: {
|
||||
status: 429,
|
||||
message: `Too many requests from this IP, please try again after ${windowInMinutes} minutes`,
|
||||
},
|
||||
statusCode: 429,
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
});
|
||||
|
||||
@@ -2,6 +2,9 @@ import type { Request, Response, NextFunction } from 'express';
|
||||
import type { AuthService } from '../../services/AuthService';
|
||||
import type { AuthTokenPayload } from '@open-archiver/types';
|
||||
import 'dotenv/config';
|
||||
import { ApiKeyService } from '../../services/ApiKeyService';
|
||||
import { UserService } from '../../services/UserService';
|
||||
|
||||
// By using module augmentation, we can add our custom 'user' property
|
||||
// to the Express Request interface in a type-safe way.
|
||||
declare global {
|
||||
@@ -15,16 +18,30 @@ declare global {
|
||||
export const requireAuth = (authService: AuthService) => {
|
||||
return async (req: Request, res: Response, next: NextFunction) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
const apiKeyHeader = req.headers['x-api-key'];
|
||||
|
||||
if (apiKeyHeader) {
|
||||
const userId = await ApiKeyService.validateKey(apiKeyHeader as string);
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid API key' });
|
||||
}
|
||||
const user = await new UserService().findById(userId);
|
||||
if (!user) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid user' });
|
||||
}
|
||||
req.user = {
|
||||
sub: user.id,
|
||||
email: user.email,
|
||||
roles: user.role ? [user.role.name] : [],
|
||||
};
|
||||
return next();
|
||||
}
|
||||
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
||||
return res.status(401).json({ message: 'Unauthorized: No token provided' });
|
||||
}
|
||||
const token = authHeader.split(' ')[1];
|
||||
try {
|
||||
// use a SUPER_API_KEY for all authentications. add process.env.SUPER_API_KEY conditional check in case user didn't set a SUPER_API_KEY.
|
||||
if (process.env.SUPER_API_KEY && token === process.env.SUPER_API_KEY) {
|
||||
next();
|
||||
return;
|
||||
}
|
||||
const payload = await authService.verifyToken(token);
|
||||
if (!payload) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid token' });
|
||||
|
||||
@@ -25,9 +25,11 @@ export const requirePermission = (
|
||||
);
|
||||
|
||||
if (!hasPermission) {
|
||||
const message = rejectMessage
|
||||
? req.t(rejectMessage)
|
||||
: req.t('errors.noPermissionToAction');
|
||||
return res.status(403).json({
|
||||
message:
|
||||
rejectMessage || `You don't have the permission to perform the current action.`,
|
||||
message,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
15
packages/backend/src/api/routes/api-key.routes.ts
Normal file
15
packages/backend/src/api/routes/api-key.routes.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { Router } from 'express';
|
||||
import { ApiKeyController } from '../controllers/api-key.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const apiKeyRoutes = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
const controller = new ApiKeyController();
|
||||
|
||||
router.post('/', requireAuth(authService), controller.generateApiKey);
|
||||
router.get('/', requireAuth(authService), controller.getApiKeys);
|
||||
router.delete('/:id', requireAuth(authService), controller.deleteApiKey);
|
||||
|
||||
return router;
|
||||
};
|
||||
@@ -1,5 +1,4 @@
|
||||
import { Router } from 'express';
|
||||
import { loginRateLimiter } from '../middleware/rateLimiter';
|
||||
import type { AuthController } from '../controllers/auth.controller';
|
||||
|
||||
export const createAuthRouter = (authController: AuthController): Router => {
|
||||
@@ -10,14 +9,14 @@ export const createAuthRouter = (authController: AuthController): Router => {
|
||||
* @description Creates the initial administrator user.
|
||||
* @access Public
|
||||
*/
|
||||
router.post('/setup', loginRateLimiter, authController.setup);
|
||||
router.post('/setup', authController.setup);
|
||||
|
||||
/**
|
||||
* @route POST /api/v1/auth/login
|
||||
* @description Authenticates a user and returns a JWT.
|
||||
* @access Public
|
||||
*/
|
||||
router.post('/login', loginRateLimiter, authController.login);
|
||||
router.post('/login', authController.login);
|
||||
|
||||
/**
|
||||
* @route GET /api/v1/auth/status
|
||||
|
||||
@@ -11,47 +11,27 @@ export const createDashboardRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.get(
|
||||
'/stats',
|
||||
requirePermission(
|
||||
'read',
|
||||
'dashboard',
|
||||
'You need the dashboard read permission to view dashboard stats.'
|
||||
),
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getStats
|
||||
);
|
||||
router.get(
|
||||
'/ingestion-history',
|
||||
requirePermission(
|
||||
'read',
|
||||
'dashboard',
|
||||
'You need the dashboard read permission to view dashboard data.'
|
||||
),
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionHistory
|
||||
);
|
||||
router.get(
|
||||
'/ingestion-sources',
|
||||
requirePermission(
|
||||
'read',
|
||||
'dashboard',
|
||||
'You need the dashboard read permission to view dashboard data.'
|
||||
),
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionSources
|
||||
);
|
||||
router.get(
|
||||
'/recent-syncs',
|
||||
requirePermission(
|
||||
'read',
|
||||
'dashboard',
|
||||
'You need the dashboard read permission to view dashboard data.'
|
||||
),
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getRecentSyncs
|
||||
);
|
||||
router.get(
|
||||
'/indexed-insights',
|
||||
requirePermission(
|
||||
'read',
|
||||
'dashboard',
|
||||
'You need the dashboard read permission to view dashboard data.'
|
||||
),
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIndexedInsights
|
||||
);
|
||||
|
||||
|
||||
@@ -23,19 +23,19 @@ export const createIamRouter = (iamController: IamController, authService: AuthS
|
||||
*/
|
||||
router.post(
|
||||
'/roles',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage roles.'),
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.createRole
|
||||
);
|
||||
|
||||
router.delete(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage roles.'),
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.deleteRole
|
||||
);
|
||||
|
||||
router.put(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage roles.'),
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.updateRole
|
||||
);
|
||||
return router;
|
||||
|
||||
16
packages/backend/src/api/routes/integrity.routes.ts
Normal file
16
packages/backend/src/api/routes/integrity.routes.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import { Router } from 'express';
|
||||
import { IntegrityController } from '../controllers/integrity.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const integrityRoutes = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
const controller = new IntegrityController();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/:id', requirePermission('read', 'archive'), controller.checkIntegrity);
|
||||
|
||||
return router;
|
||||
};
|
||||
25
packages/backend/src/api/routes/jobs.routes.ts
Normal file
25
packages/backend/src/api/routes/jobs.routes.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Router } from 'express';
|
||||
import { JobsController } from '../controllers/jobs.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createJobsRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
const jobsController = new JobsController();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get(
|
||||
'/queues',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
jobsController.getQueues
|
||||
);
|
||||
router.get(
|
||||
'/queues/:queueName',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
jobsController.getQueueJobs
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
25
packages/backend/src/api/routes/settings.routes.ts
Normal file
25
packages/backend/src/api/routes/settings.routes.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Router } from 'express';
|
||||
import * as settingsController from '../controllers/settings.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createSettingsRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
// Public route to get non-sensitive settings. settings read should not be scoped with a permission because all end users need the settings data in the frontend. However, for sensitive settings data, we need to add a new permission subject to limit access. So this route should only expose non-sensitive settings data.
|
||||
/**
|
||||
* @returns SystemSettings
|
||||
*/
|
||||
router.get('/system', settingsController.getSystemSettings);
|
||||
|
||||
// Protected route to update settings
|
||||
router.put(
|
||||
'/system',
|
||||
requireAuth(authService),
|
||||
requirePermission('manage', 'settings', 'settings.noPermissionToUpdate'),
|
||||
settingsController.updateSystemSettings
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
@@ -1,6 +0,0 @@
|
||||
import { Router } from 'express';
|
||||
import { ingestionQueue } from '../../jobs/queues';
|
||||
|
||||
const router: Router = Router();
|
||||
|
||||
export default router;
|
||||
@@ -11,6 +11,10 @@ export const createUserRouter = (authService: AuthService): Router => {
|
||||
|
||||
router.get('/', requirePermission('read', 'users'), userController.getUsers);
|
||||
|
||||
router.get('/profile', userController.getProfile);
|
||||
router.patch('/profile', userController.updateProfile);
|
||||
router.post('/profile/password', userController.updatePassword);
|
||||
|
||||
router.get('/:id', requirePermission('read', 'users'), userController.getUser);
|
||||
|
||||
/**
|
||||
@@ -18,19 +22,19 @@ export const createUserRouter = (authService: AuthService): Router => {
|
||||
*/
|
||||
router.post(
|
||||
'/',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage users.'),
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.createUser
|
||||
);
|
||||
|
||||
router.put(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage users.'),
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.updateUser
|
||||
);
|
||||
|
||||
router.delete(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'Super Admin role is required to manage users.'),
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.deleteUser
|
||||
);
|
||||
|
||||
|
||||
170
packages/backend/src/api/server.ts
Normal file
170
packages/backend/src/api/server.ts
Normal file
@@ -0,0 +1,170 @@
|
||||
import express, { Express } from 'express';
|
||||
import cors from 'cors';
|
||||
import dotenv from 'dotenv';
|
||||
import { AuthController } from './controllers/auth.controller';
|
||||
import { IngestionController } from './controllers/ingestion.controller';
|
||||
import { ArchivedEmailController } from './controllers/archived-email.controller';
|
||||
import { StorageController } from './controllers/storage.controller';
|
||||
import { SearchController } from './controllers/search.controller';
|
||||
import { IamController } from './controllers/iam.controller';
|
||||
import { createAuthRouter } from './routes/auth.routes';
|
||||
import { createIamRouter } from './routes/iam.routes';
|
||||
import { createIngestionRouter } from './routes/ingestion.routes';
|
||||
import { createArchivedEmailRouter } from './routes/archived-email.routes';
|
||||
import { createStorageRouter } from './routes/storage.routes';
|
||||
import { createSearchRouter } from './routes/search.routes';
|
||||
import { createDashboardRouter } from './routes/dashboard.routes';
|
||||
import { createUploadRouter } from './routes/upload.routes';
|
||||
import { createUserRouter } from './routes/user.routes';
|
||||
import { createSettingsRouter } from './routes/settings.routes';
|
||||
import { apiKeyRoutes } from './routes/api-key.routes';
|
||||
import { integrityRoutes } from './routes/integrity.routes';
|
||||
import { createJobsRouter } from './routes/jobs.routes';
|
||||
import { AuthService } from '../services/AuthService';
|
||||
import { AuditService } from '../services/AuditService';
|
||||
import { UserService } from '../services/UserService';
|
||||
import { IamService } from '../services/IamService';
|
||||
import { StorageService } from '../services/StorageService';
|
||||
import { SearchService } from '../services/SearchService';
|
||||
import { SettingsService } from '../services/SettingsService';
|
||||
import i18next from 'i18next';
|
||||
import FsBackend from 'i18next-fs-backend';
|
||||
import i18nextMiddleware from 'i18next-http-middleware';
|
||||
import path from 'path';
|
||||
import { logger } from '../config/logger';
|
||||
import { rateLimiter } from './middleware/rateLimiter';
|
||||
import { config } from '../config';
|
||||
import { OpenArchiverFeature } from '@open-archiver/types';
|
||||
// Define the "plugin" interface
|
||||
export interface ArchiverModule {
|
||||
initialize: (app: Express, authService: AuthService) => Promise<void>;
|
||||
name: OpenArchiverFeature;
|
||||
}
|
||||
|
||||
export let authService: AuthService;
|
||||
|
||||
export async function createServer(modules: ArchiverModule[] = []): Promise<Express> {
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
|
||||
// --- Environment Variable Validation ---
|
||||
const { JWT_SECRET, JWT_EXPIRES_IN } = process.env;
|
||||
|
||||
if (!JWT_SECRET || !JWT_EXPIRES_IN) {
|
||||
throw new Error(
|
||||
'Missing required environment variables for the backend: JWT_SECRET, JWT_EXPIRES_IN.'
|
||||
);
|
||||
}
|
||||
|
||||
// --- Dependency Injection Setup ---
|
||||
const auditService = new AuditService();
|
||||
const userService = new UserService();
|
||||
authService = new AuthService(userService, auditService, JWT_SECRET, JWT_EXPIRES_IN);
|
||||
const authController = new AuthController(authService, userService);
|
||||
const ingestionController = new IngestionController();
|
||||
const archivedEmailController = new ArchivedEmailController();
|
||||
const storageService = new StorageService();
|
||||
const storageController = new StorageController(storageService);
|
||||
const searchService = new SearchService();
|
||||
const searchController = new SearchController();
|
||||
const iamService = new IamService();
|
||||
const iamController = new IamController(iamService);
|
||||
const settingsService = new SettingsService();
|
||||
|
||||
// --- i18next Initialization ---
|
||||
const initializeI18next = async () => {
|
||||
const systemSettings = await settingsService.getSystemSettings();
|
||||
const defaultLanguage = systemSettings?.language || 'en';
|
||||
logger.info({ language: defaultLanguage }, 'Default language');
|
||||
await i18next.use(FsBackend).init({
|
||||
lng: defaultLanguage,
|
||||
fallbackLng: defaultLanguage,
|
||||
ns: ['translation'],
|
||||
defaultNS: 'translation',
|
||||
backend: {
|
||||
loadPath: path.resolve(__dirname, '../locales/{{lng}}/{{ns}}.json'),
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
// Initialize i18next
|
||||
await initializeI18next();
|
||||
logger.info({}, 'i18next initialized');
|
||||
|
||||
// Configure the Meilisearch index on startup
|
||||
logger.info({}, 'Configuring email index...');
|
||||
await searchService.configureEmailIndex();
|
||||
|
||||
const app = express();
|
||||
|
||||
// --- CORS ---
|
||||
app.use(
|
||||
cors({
|
||||
origin: process.env.APP_URL || 'http://localhost:3000',
|
||||
credentials: true,
|
||||
})
|
||||
);
|
||||
|
||||
// Trust the proxy to get the real IP address of the client.
|
||||
// This is important for audit logging and security.
|
||||
app.set('trust proxy', true);
|
||||
|
||||
// --- Routes ---
|
||||
const authRouter = createAuthRouter(authController);
|
||||
const ingestionRouter = createIngestionRouter(ingestionController, authService);
|
||||
const archivedEmailRouter = createArchivedEmailRouter(archivedEmailController, authService);
|
||||
const storageRouter = createStorageRouter(storageController, authService);
|
||||
const searchRouter = createSearchRouter(searchController, authService);
|
||||
const dashboardRouter = createDashboardRouter(authService);
|
||||
const iamRouter = createIamRouter(iamController, authService);
|
||||
const uploadRouter = createUploadRouter(authService);
|
||||
const userRouter = createUserRouter(authService);
|
||||
const settingsRouter = createSettingsRouter(authService);
|
||||
const apiKeyRouter = apiKeyRoutes(authService);
|
||||
const integrityRouter = integrityRoutes(authService);
|
||||
const jobsRouter = createJobsRouter(authService);
|
||||
|
||||
// Middleware for all other routes
|
||||
app.use((req, res, next) => {
|
||||
// exclude certain API endpoints from the rate limiter, for example status, system settings
|
||||
const excludedPatterns = [/^\/v\d+\/auth\/status$/, /^\/v\d+\/settings\/system$/];
|
||||
for (const pattern of excludedPatterns) {
|
||||
if (pattern.test(req.path)) {
|
||||
return next();
|
||||
}
|
||||
}
|
||||
rateLimiter(req, res, next);
|
||||
});
|
||||
app.use(express.json());
|
||||
app.use(express.urlencoded({ extended: true }));
|
||||
|
||||
// i18n middleware
|
||||
app.use(i18nextMiddleware.handle(i18next));
|
||||
|
||||
app.use(`/${config.api.version}/auth`, authRouter);
|
||||
app.use(`/${config.api.version}/iam`, iamRouter);
|
||||
app.use(`/${config.api.version}/upload`, uploadRouter);
|
||||
app.use(`/${config.api.version}/ingestion-sources`, ingestionRouter);
|
||||
app.use(`/${config.api.version}/archived-emails`, archivedEmailRouter);
|
||||
app.use(`/${config.api.version}/storage`, storageRouter);
|
||||
app.use(`/${config.api.version}/search`, searchRouter);
|
||||
app.use(`/${config.api.version}/dashboard`, dashboardRouter);
|
||||
app.use(`/${config.api.version}/users`, userRouter);
|
||||
app.use(`/${config.api.version}/settings`, settingsRouter);
|
||||
app.use(`/${config.api.version}/api-keys`, apiKeyRouter);
|
||||
app.use(`/${config.api.version}/integrity`, integrityRouter);
|
||||
app.use(`/${config.api.version}/jobs`, jobsRouter);
|
||||
|
||||
// Load all provided extension modules
|
||||
for (const module of modules) {
|
||||
await module.initialize(app, authService);
|
||||
console.log(`🏢 Enterprise module loaded: ${module.name}`);
|
||||
}
|
||||
app.get('/', (req, res) => {
|
||||
res.send('Backend is running!!');
|
||||
});
|
||||
|
||||
console.log('✅ Core OSS modules loaded.');
|
||||
|
||||
return app;
|
||||
}
|
||||
13
packages/backend/src/config/api.ts
Normal file
13
packages/backend/src/config/api.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import 'dotenv/config';
|
||||
|
||||
export const apiConfig = {
|
||||
rateLimit: {
|
||||
windowMs: process.env.RATE_LIMIT_WINDOW_MS
|
||||
? parseInt(process.env.RATE_LIMIT_WINDOW_MS, 10)
|
||||
: 1 * 60 * 1000, // 1 minutes
|
||||
max: process.env.RATE_LIMIT_MAX_REQUESTS
|
||||
? parseInt(process.env.RATE_LIMIT_MAX_REQUESTS, 10)
|
||||
: 100, // limit each IP to 100 requests per windowMs
|
||||
},
|
||||
version: 'v1',
|
||||
};
|
||||
@@ -4,6 +4,8 @@ export const app = {
|
||||
nodeEnv: process.env.NODE_ENV || 'development',
|
||||
port: process.env.PORT_BACKEND ? parseInt(process.env.PORT_BACKEND, 10) : 4000,
|
||||
encryptionKey: process.env.ENCRYPTION_KEY,
|
||||
isDemo: process.env.IS_DEMO === 'true',
|
||||
syncFrequency: process.env.SYNC_FREQUENCY || '* * * * *', //default to 1 minute
|
||||
enableDeletion: process.env.ENABLE_DELETION === 'true',
|
||||
allInclusiveArchive: process.env.ALL_INCLUSIVE_ARCHIVE === 'true',
|
||||
isDemo: process.env.IS_DEMO === 'true',
|
||||
};
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
import { storage } from './storage';
|
||||
import { app } from './app';
|
||||
import { searchConfig } from './search';
|
||||
import { searchConfig, meiliConfig } from './search';
|
||||
import { connection as redisConfig } from './redis';
|
||||
import { apiConfig } from './api';
|
||||
|
||||
export const config = {
|
||||
storage,
|
||||
app,
|
||||
search: searchConfig,
|
||||
meili: meiliConfig,
|
||||
redis: redisConfig,
|
||||
api: apiConfig,
|
||||
};
|
||||
|
||||
@@ -2,6 +2,7 @@ import pino from 'pino';
|
||||
|
||||
export const logger = pino({
|
||||
level: process.env.LOG_LEVEL || 'info',
|
||||
redact: ['password'],
|
||||
transport: {
|
||||
target: 'pino-pretty',
|
||||
options: {
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
import 'dotenv/config';
|
||||
import { type ConnectionOptions } from 'bullmq';
|
||||
|
||||
/**
|
||||
* @see https://github.com/taskforcesh/bullmq/blob/master/docs/gitbook/guide/connections.md
|
||||
*/
|
||||
const connectionOptions: any = {
|
||||
const connectionOptions: ConnectionOptions = {
|
||||
host: process.env.REDIS_HOST || 'localhost',
|
||||
port: (process.env.REDIS_PORT && parseInt(process.env.REDIS_PORT, 10)) || 6379,
|
||||
password: process.env.REDIS_PASSWORD,
|
||||
enableReadyCheck: true,
|
||||
};
|
||||
|
||||
if (process.env.REDIS_USER) {
|
||||
connectionOptions.username = process.env.REDIS_USER;
|
||||
}
|
||||
|
||||
if (process.env.REDIS_TLS_ENABLED === 'true') {
|
||||
connectionOptions.tls = {
|
||||
rejectUnauthorized: false,
|
||||
|
||||
@@ -4,3 +4,9 @@ export const searchConfig = {
|
||||
host: process.env.MEILI_HOST || 'http://127.0.0.1:7700',
|
||||
apiKey: process.env.MEILI_MASTER_KEY || '',
|
||||
};
|
||||
|
||||
export const meiliConfig = {
|
||||
indexingBatchSize: process.env.MEILI_INDEXING_BATCH
|
||||
? parseInt(process.env.MEILI_INDEXING_BATCH)
|
||||
: 500,
|
||||
};
|
||||
|
||||
@@ -2,9 +2,14 @@ import { StorageConfig } from '@open-archiver/types';
|
||||
import 'dotenv/config';
|
||||
|
||||
const storageType = process.env.STORAGE_TYPE;
|
||||
const encryptionKey = process.env.STORAGE_ENCRYPTION_KEY;
|
||||
const openArchiverFolderName = 'open-archiver';
|
||||
let storageConfig: StorageConfig;
|
||||
|
||||
if (encryptionKey && !/^[a-fA-F0-9]{64}$/.test(encryptionKey)) {
|
||||
throw new Error('STORAGE_ENCRYPTION_KEY must be a 64-character hex string (32 bytes)');
|
||||
}
|
||||
|
||||
if (storageType === 'local') {
|
||||
if (!process.env.STORAGE_LOCAL_ROOT_PATH) {
|
||||
throw new Error('STORAGE_LOCAL_ROOT_PATH is not defined in the environment variables');
|
||||
@@ -13,6 +18,7 @@ if (storageType === 'local') {
|
||||
type: 'local',
|
||||
rootPath: process.env.STORAGE_LOCAL_ROOT_PATH,
|
||||
openArchiverFolderName: openArchiverFolderName,
|
||||
encryptionKey: encryptionKey,
|
||||
};
|
||||
} else if (storageType === 's3') {
|
||||
if (
|
||||
@@ -32,6 +38,7 @@ if (storageType === 'local') {
|
||||
region: process.env.STORAGE_S3_REGION,
|
||||
forcePathStyle: process.env.STORAGE_S3_FORCE_PATH_STYLE === 'true',
|
||||
openArchiverFolderName: openArchiverFolderName,
|
||||
encryptionKey: encryptionKey,
|
||||
};
|
||||
} else {
|
||||
throw new Error(`Invalid STORAGE_TYPE: ${storageType}`);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { drizzle } from 'drizzle-orm/postgres-js';
|
||||
import { drizzle, PostgresJsDatabase } from 'drizzle-orm/postgres-js';
|
||||
import postgres from 'postgres';
|
||||
import 'dotenv/config';
|
||||
|
||||
@@ -12,3 +12,4 @@ if (!process.env.DATABASE_URL) {
|
||||
const connectionString = encodeDatabaseUrl(process.env.DATABASE_URL);
|
||||
const client = postgres(connectionString);
|
||||
export const db = drizzle(client, { schema });
|
||||
export type Database = PostgresJsDatabase<typeof schema>;
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
CREATE TABLE "system_settings" (
|
||||
"id" serial PRIMARY KEY NOT NULL,
|
||||
"config" jsonb NOT NULL
|
||||
);
|
||||
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE "api_keys" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"user_id" uuid NOT NULL,
|
||||
"key" text NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "api_keys" ADD CONSTRAINT "api_keys_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "api_keys" ADD COLUMN "key_hash" text NOT NULL;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TYPE "public"."ingestion_provider" ADD VALUE 'mbox_import';
|
||||
@@ -0,0 +1,9 @@
|
||||
CREATE TYPE "public"."audit_log_action" AS ENUM('CREATE', 'READ', 'UPDATE', 'DELETE', 'LOGIN', 'LOGOUT', 'SETUP', 'IMPORT', 'PAUSE', 'SYNC', 'UPLOAD', 'SEARCH', 'DOWNLOAD', 'GENERATE');--> statement-breakpoint
|
||||
CREATE TYPE "public"."audit_log_target_type" AS ENUM('ApiKey', 'ArchivedEmail', 'Dashboard', 'IngestionSource', 'Role', 'SystemSettings', 'User', 'File');--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" ALTER COLUMN "target_type" SET DATA TYPE "public"."audit_log_target_type" USING "target_type"::"public"."audit_log_target_type";--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" ADD COLUMN "previous_hash" varchar(64);--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" ADD COLUMN "actor_ip" text;--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" ADD COLUMN "action_type" "audit_log_action" NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" ADD COLUMN "current_hash" varchar(64) NOT NULL;--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" DROP COLUMN "action";--> statement-breakpoint
|
||||
ALTER TABLE "audit_logs" DROP COLUMN "is_tamper_evident";
|
||||
@@ -0,0 +1,4 @@
|
||||
ALTER TABLE "attachments" DROP CONSTRAINT "attachments_content_hash_sha256_unique";--> statement-breakpoint
|
||||
ALTER TABLE "attachments" ADD COLUMN "ingestion_source_id" uuid;--> statement-breakpoint
|
||||
ALTER TABLE "attachments" ADD CONSTRAINT "attachments_ingestion_source_id_ingestion_sources_id_fk" FOREIGN KEY ("ingestion_source_id") REFERENCES "public"."ingestion_sources"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
CREATE UNIQUE INDEX "source_hash_unique" ON "attachments" USING btree ("ingestion_source_id","content_hash_sha256");
|
||||
@@ -0,0 +1,2 @@
|
||||
DROP INDEX "source_hash_unique";--> statement-breakpoint
|
||||
CREATE INDEX "source_hash_idx" ON "attachments" USING btree ("ingestion_source_id","content_hash_sha256");
|
||||
1103
packages/backend/src/database/migrations/meta/0017_snapshot.json
Normal file
1103
packages/backend/src/database/migrations/meta/0017_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1171
packages/backend/src/database/migrations/meta/0018_snapshot.json
Normal file
1171
packages/backend/src/database/migrations/meta/0018_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1177
packages/backend/src/database/migrations/meta/0019_snapshot.json
Normal file
1177
packages/backend/src/database/migrations/meta/0019_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1178
packages/backend/src/database/migrations/meta/0020_snapshot.json
Normal file
1178
packages/backend/src/database/migrations/meta/0020_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1225
packages/backend/src/database/migrations/meta/0021_snapshot.json
Normal file
1225
packages/backend/src/database/migrations/meta/0021_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1257
packages/backend/src/database/migrations/meta/0022_snapshot.json
Normal file
1257
packages/backend/src/database/migrations/meta/0022_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
1257
packages/backend/src/database/migrations/meta/0023_snapshot.json
Normal file
1257
packages/backend/src/database/migrations/meta/0023_snapshot.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -120,6 +120,55 @@
|
||||
"when": 1755780572342,
|
||||
"tag": "0016_lonely_mariko_yashida",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 17,
|
||||
"version": "7",
|
||||
"when": 1755961566627,
|
||||
"tag": "0017_tranquil_shooting_star",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 18,
|
||||
"version": "7",
|
||||
"when": 1756911118035,
|
||||
"tag": "0018_flawless_owl",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 19,
|
||||
"version": "7",
|
||||
"when": 1756937533843,
|
||||
"tag": "0019_confused_scream",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 20,
|
||||
"version": "7",
|
||||
"when": 1757860242528,
|
||||
"tag": "0020_panoramic_wolverine",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 21,
|
||||
"version": "7",
|
||||
"when": 1759412986134,
|
||||
"tag": "0021_nosy_veda",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 22,
|
||||
"version": "7",
|
||||
"when": 1759701622932,
|
||||
"tag": "0022_complete_triton",
|
||||
"breakpoints": true
|
||||
},
|
||||
{
|
||||
"idx": 23,
|
||||
"version": "7",
|
||||
"when": 1760354094610,
|
||||
"tag": "0023_swift_swordsman",
|
||||
"breakpoints": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -5,3 +5,7 @@ export * from './schema/compliance';
|
||||
export * from './schema/custodians';
|
||||
export * from './schema/ingestion-sources';
|
||||
export * from './schema/users';
|
||||
export * from './schema/system-settings';
|
||||
export * from './schema/api-keys';
|
||||
export * from './schema/audit-logs';
|
||||
export * from './schema/enums';
|
||||
|
||||
15
packages/backend/src/database/schema/api-keys.ts
Normal file
15
packages/backend/src/database/schema/api-keys.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { pgTable, text, timestamp, uuid } from 'drizzle-orm/pg-core';
|
||||
import { users } from './users';
|
||||
|
||||
export const apiKeys = pgTable('api_keys', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
name: text('name').notNull(),
|
||||
userId: uuid('user_id')
|
||||
.notNull()
|
||||
.references(() => users.id, { onDelete: 'cascade' }),
|
||||
key: text('key').notNull(), // Encrypted API key
|
||||
keyHash: text('key_hash').notNull(),
|
||||
expiresAt: timestamp('expires_at', { withTimezone: true, mode: 'date' }).notNull(),
|
||||
createdAt: timestamp('created_at').defaultNow().notNull(),
|
||||
updatedAt: timestamp('updated_at').defaultNow().notNull(),
|
||||
});
|
||||
@@ -1,15 +1,23 @@
|
||||
import { relations } from 'drizzle-orm';
|
||||
import { pgTable, text, uuid, bigint, primaryKey } from 'drizzle-orm/pg-core';
|
||||
import { pgTable, text, uuid, bigint, primaryKey, index } from 'drizzle-orm/pg-core';
|
||||
import { archivedEmails } from './archived-emails';
|
||||
import { ingestionSources } from './ingestion-sources';
|
||||
|
||||
export const attachments = pgTable('attachments', {
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
filename: text('filename').notNull(),
|
||||
mimeType: text('mime_type'),
|
||||
sizeBytes: bigint('size_bytes', { mode: 'number' }).notNull(),
|
||||
contentHashSha256: text('content_hash_sha256').notNull().unique(),
|
||||
storagePath: text('storage_path').notNull(),
|
||||
});
|
||||
export const attachments = pgTable(
|
||||
'attachments',
|
||||
{
|
||||
id: uuid('id').primaryKey().defaultRandom(),
|
||||
filename: text('filename').notNull(),
|
||||
mimeType: text('mime_type'),
|
||||
sizeBytes: bigint('size_bytes', { mode: 'number' }).notNull(),
|
||||
contentHashSha256: text('content_hash_sha256').notNull(),
|
||||
storagePath: text('storage_path').notNull(),
|
||||
ingestionSourceId: uuid('ingestion_source_id').references(() => ingestionSources.id, {
|
||||
onDelete: 'cascade',
|
||||
}),
|
||||
},
|
||||
(table) => [index('source_hash_idx').on(table.ingestionSourceId, table.contentHashSha256)]
|
||||
);
|
||||
|
||||
export const emailAttachments = pgTable(
|
||||
'email_attachments',
|
||||
|
||||
@@ -1,12 +1,34 @@
|
||||
import { bigserial, boolean, jsonb, pgTable, text, timestamp } from 'drizzle-orm/pg-core';
|
||||
import { bigserial, jsonb, pgTable, text, timestamp, varchar } from 'drizzle-orm/pg-core';
|
||||
import { auditLogActionEnum, auditLogTargetTypeEnum } from './enums';
|
||||
|
||||
export const auditLogs = pgTable('audit_logs', {
|
||||
// A unique, sequential, and gapless primary key for ordering.
|
||||
id: bigserial('id', { mode: 'number' }).primaryKey(),
|
||||
|
||||
// The SHA-256 hash of the preceding log entry's `currentHash`.
|
||||
previousHash: varchar('previous_hash', { length: 64 }),
|
||||
|
||||
// A high-precision, UTC timestamp of when the event occurred.
|
||||
timestamp: timestamp('timestamp', { withTimezone: true }).notNull().defaultNow(),
|
||||
|
||||
// A stable identifier for the actor who performed the action.
|
||||
actorIdentifier: text('actor_identifier').notNull(),
|
||||
action: text('action').notNull(),
|
||||
targetType: text('target_type'),
|
||||
|
||||
// The IP address from which the action was initiated.
|
||||
actorIp: text('actor_ip'),
|
||||
|
||||
// A standardized, machine-readable identifier for the event.
|
||||
actionType: auditLogActionEnum('action_type').notNull(),
|
||||
|
||||
// The type of resource that was affected by the action.
|
||||
targetType: auditLogTargetTypeEnum('target_type'),
|
||||
|
||||
// The unique identifier of the affected resource.
|
||||
targetId: text('target_id'),
|
||||
|
||||
// A JSON object containing specific, contextual details of the event.
|
||||
details: jsonb('details'),
|
||||
isTamperEvident: boolean('is_tamper_evident').default(false),
|
||||
|
||||
// The SHA-256 hash of this entire log entry's contents.
|
||||
currentHash: varchar('current_hash', { length: 64 }).notNull(),
|
||||
});
|
||||
|
||||
5
packages/backend/src/database/schema/enums.ts
Normal file
5
packages/backend/src/database/schema/enums.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import { pgEnum } from 'drizzle-orm/pg-core';
|
||||
import { AuditLogActions, AuditLogTargetTypes } from '@open-archiver/types';
|
||||
|
||||
export const auditLogActionEnum = pgEnum('audit_log_action', AuditLogActions);
|
||||
export const auditLogTargetTypeEnum = pgEnum('audit_log_target_type', AuditLogTargetTypes);
|
||||
@@ -8,6 +8,7 @@ export const ingestionProviderEnum = pgEnum('ingestion_provider', [
|
||||
'generic_imap',
|
||||
'pst_import',
|
||||
'eml_import',
|
||||
'mbox_import',
|
||||
]);
|
||||
|
||||
export const ingestionStatusEnum = pgEnum('ingestion_status', [
|
||||
|
||||
7
packages/backend/src/database/schema/system-settings.ts
Normal file
7
packages/backend/src/database/schema/system-settings.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { pgTable, serial, jsonb } from 'drizzle-orm/pg-core';
|
||||
import type { SystemSettings } from '@open-archiver/types';
|
||||
|
||||
export const systemSettings = pgTable('system_settings', {
|
||||
id: serial('id').primaryKey(),
|
||||
config: jsonb('config').$type<SystemSettings>().notNull(),
|
||||
});
|
||||
9
packages/backend/src/helpers/deletionGuard.ts
Normal file
9
packages/backend/src/helpers/deletionGuard.ts
Normal file
@@ -0,0 +1,9 @@
|
||||
import { config } from '../config';
|
||||
import i18next from 'i18next';
|
||||
|
||||
export function checkDeletionEnabled() {
|
||||
if (!config.app.enableDeletion) {
|
||||
const errorMessage = i18next.t('Deletion is disabled for this instance.');
|
||||
throw new Error(errorMessage);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,10 @@
|
||||
import PDFParser from 'pdf2json';
|
||||
import mammoth from 'mammoth';
|
||||
import xlsx from 'xlsx';
|
||||
import { logger } from '../config/logger';
|
||||
import { OcrService } from '../services/OcrService';
|
||||
|
||||
// Legacy PDF extraction (with improved memory management)
|
||||
function extractTextFromPdf(buffer: Buffer): Promise<string> {
|
||||
return new Promise((resolve) => {
|
||||
const pdfParser = new PDFParser(null, true);
|
||||
@@ -10,28 +13,57 @@ function extractTextFromPdf(buffer: Buffer): Promise<string> {
|
||||
const finish = (text: string) => {
|
||||
if (completed) return;
|
||||
completed = true;
|
||||
pdfParser.removeAllListeners();
|
||||
|
||||
// explicit cleanup
|
||||
try {
|
||||
pdfParser.removeAllListeners();
|
||||
} catch (e) {
|
||||
// Ignore cleanup errors
|
||||
}
|
||||
|
||||
resolve(text);
|
||||
};
|
||||
|
||||
pdfParser.on('pdfParser_dataError', () => finish(''));
|
||||
pdfParser.on('pdfParser_dataReady', () => finish(pdfParser.getRawTextContent()));
|
||||
pdfParser.on('pdfParser_dataError', (err: any) => {
|
||||
logger.warn('PDF parsing error:', err?.parserError || 'Unknown error');
|
||||
finish('');
|
||||
});
|
||||
|
||||
pdfParser.on('pdfParser_dataReady', () => {
|
||||
try {
|
||||
const text = pdfParser.getRawTextContent();
|
||||
finish(text || '');
|
||||
} catch (err) {
|
||||
logger.warn('Error getting PDF text content:', err);
|
||||
finish('');
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
pdfParser.parseBuffer(buffer);
|
||||
} catch (err) {
|
||||
console.error('Error parsing PDF buffer', err);
|
||||
logger.error('Error parsing PDF buffer:', err);
|
||||
finish('');
|
||||
}
|
||||
|
||||
// Prevent hanging if the parser never emits events
|
||||
setTimeout(() => finish(''), 10000);
|
||||
// reduced Timeout for better performance
|
||||
// setTimeout(() => {
|
||||
// logger.warn('PDF parsing timed out');
|
||||
// finish('');
|
||||
// }, 5000);
|
||||
});
|
||||
}
|
||||
|
||||
export async function extractText(buffer: Buffer, mimeType: string): Promise<string> {
|
||||
// Legacy text extraction for various formats
|
||||
async function extractTextLegacy(buffer: Buffer, mimeType: string): Promise<string> {
|
||||
try {
|
||||
if (mimeType === 'application/pdf') {
|
||||
// Check PDF size (memory protection)
|
||||
if (buffer.length > 50 * 1024 * 1024) {
|
||||
// 50MB Limit
|
||||
logger.warn('PDF too large for legacy extraction, skipping');
|
||||
return '';
|
||||
}
|
||||
return await extractTextFromPdf(buffer);
|
||||
}
|
||||
|
||||
@@ -50,7 +82,7 @@ export async function extractText(buffer: Buffer, mimeType: string): Promise<str
|
||||
const sheetText = xlsx.utils.sheet_to_txt(sheet);
|
||||
fullText += sheetText + '\n';
|
||||
}
|
||||
return fullText;
|
||||
return fullText.trim();
|
||||
}
|
||||
|
||||
if (
|
||||
@@ -60,11 +92,56 @@ export async function extractText(buffer: Buffer, mimeType: string): Promise<str
|
||||
) {
|
||||
return buffer.toString('utf-8');
|
||||
}
|
||||
|
||||
return '';
|
||||
} catch (error) {
|
||||
console.error(`Error extracting text from attachment with MIME type ${mimeType}:`, error);
|
||||
return ''; // Return empty string on failure
|
||||
logger.error(`Error extracting text from attachment with MIME type ${mimeType}:`, error);
|
||||
|
||||
// Force garbage collection if available
|
||||
if (global.gc) {
|
||||
global.gc();
|
||||
}
|
||||
|
||||
return '';
|
||||
}
|
||||
}
|
||||
|
||||
// Main extraction function
|
||||
export async function extractText(buffer: Buffer, mimeType: string): Promise<string> {
|
||||
// Input validation
|
||||
if (!buffer || buffer.length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
console.warn(`Unsupported MIME type for text extraction: ${mimeType}`);
|
||||
return ''; // Return empty string for unsupported types
|
||||
if (!mimeType) {
|
||||
logger.warn('No MIME type provided for text extraction');
|
||||
return '';
|
||||
}
|
||||
|
||||
// General size limit
|
||||
const maxSize = process.env.TIKA_URL ? 100 * 1024 * 1024 : 50 * 1024 * 1024; // 100MB for Tika, 50MB for Legacy
|
||||
if (buffer.length > maxSize) {
|
||||
logger.warn(
|
||||
`File too large for text extraction: ${buffer.length} bytes (limit: ${maxSize})`
|
||||
);
|
||||
return '';
|
||||
}
|
||||
|
||||
// Decide between Tika and legacy
|
||||
const tikaUrl = process.env.TIKA_URL;
|
||||
|
||||
if (tikaUrl) {
|
||||
// Tika decides what it can parse
|
||||
logger.debug(`Using Tika for text extraction: ${mimeType}`);
|
||||
const ocrService = new OcrService();
|
||||
try {
|
||||
return await ocrService.extractTextWithTika(buffer, mimeType);
|
||||
} catch (error) {
|
||||
logger.error({ error }, 'OCR text extraction failed, returning empty string');
|
||||
return '';
|
||||
}
|
||||
} else {
|
||||
// extract using legacy mode
|
||||
return await extractTextLegacy(buffer, mimeType);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,110 +1,10 @@
|
||||
import express from 'express';
|
||||
import dotenv from 'dotenv';
|
||||
import { AuthController } from './api/controllers/auth.controller';
|
||||
import { IngestionController } from './api/controllers/ingestion.controller';
|
||||
import { ArchivedEmailController } from './api/controllers/archived-email.controller';
|
||||
import { StorageController } from './api/controllers/storage.controller';
|
||||
import { SearchController } from './api/controllers/search.controller';
|
||||
import { IamController } from './api/controllers/iam.controller';
|
||||
import { requireAuth } from './api/middleware/requireAuth';
|
||||
import { createAuthRouter } from './api/routes/auth.routes';
|
||||
import { createIamRouter } from './api/routes/iam.routes';
|
||||
import { createIngestionRouter } from './api/routes/ingestion.routes';
|
||||
import { createArchivedEmailRouter } from './api/routes/archived-email.routes';
|
||||
import { createStorageRouter } from './api/routes/storage.routes';
|
||||
import { createSearchRouter } from './api/routes/search.routes';
|
||||
import { createDashboardRouter } from './api/routes/dashboard.routes';
|
||||
import { createUploadRouter } from './api/routes/upload.routes';
|
||||
import { createUserRouter } from './api/routes/user.routes';
|
||||
import testRouter from './api/routes/test.routes';
|
||||
import { AuthService } from './services/AuthService';
|
||||
import { UserService } from './services/UserService';
|
||||
import { IamService } from './services/IamService';
|
||||
import { StorageService } from './services/StorageService';
|
||||
import { SearchService } from './services/SearchService';
|
||||
|
||||
// Load environment variables
|
||||
dotenv.config();
|
||||
|
||||
// --- Environment Variable Validation ---
|
||||
const { PORT_BACKEND, JWT_SECRET, JWT_EXPIRES_IN } = process.env;
|
||||
|
||||
if (!PORT_BACKEND || !JWT_SECRET || !JWT_EXPIRES_IN) {
|
||||
throw new Error(
|
||||
'Missing required environment variables for the backend: PORT_BACKEND, JWT_SECRET, JWT_EXPIRES_IN.'
|
||||
);
|
||||
}
|
||||
|
||||
// --- Dependency Injection Setup ---
|
||||
|
||||
const userService = new UserService();
|
||||
const authService = new AuthService(userService, JWT_SECRET, JWT_EXPIRES_IN);
|
||||
const authController = new AuthController(authService, userService);
|
||||
const ingestionController = new IngestionController();
|
||||
const archivedEmailController = new ArchivedEmailController();
|
||||
const storageService = new StorageService();
|
||||
const storageController = new StorageController(storageService);
|
||||
const searchService = new SearchService();
|
||||
const searchController = new SearchController();
|
||||
const iamService = new IamService();
|
||||
const iamController = new IamController(iamService);
|
||||
|
||||
// --- Express App Initialization ---
|
||||
const app = express();
|
||||
|
||||
// --- Routes ---
|
||||
const authRouter = createAuthRouter(authController);
|
||||
const ingestionRouter = createIngestionRouter(ingestionController, authService);
|
||||
const archivedEmailRouter = createArchivedEmailRouter(archivedEmailController, authService);
|
||||
const storageRouter = createStorageRouter(storageController, authService);
|
||||
const searchRouter = createSearchRouter(searchController, authService);
|
||||
const dashboardRouter = createDashboardRouter(authService);
|
||||
const iamRouter = createIamRouter(iamController, authService);
|
||||
const uploadRouter = createUploadRouter(authService);
|
||||
const userRouter = createUserRouter(authService);
|
||||
// upload route is added before middleware because it doesn't use the json middleware.
|
||||
app.use('/v1/upload', uploadRouter);
|
||||
|
||||
// Middleware for all other routes
|
||||
app.use(express.json());
|
||||
app.use(express.urlencoded({ extended: true }));
|
||||
|
||||
app.use('/v1/auth', authRouter);
|
||||
app.use('/v1/iam', iamRouter);
|
||||
app.use('/v1/ingestion-sources', ingestionRouter);
|
||||
app.use('/v1/archived-emails', archivedEmailRouter);
|
||||
app.use('/v1/storage', storageRouter);
|
||||
app.use('/v1/search', searchRouter);
|
||||
app.use('/v1/dashboard', dashboardRouter);
|
||||
app.use('/v1/users', userRouter);
|
||||
app.use('/v1/test', testRouter);
|
||||
|
||||
// Example of a protected route
|
||||
app.get('/v1/protected', requireAuth(authService), (req, res) => {
|
||||
res.json({
|
||||
message: 'You have accessed a protected route!',
|
||||
user: req.user, // The user payload is attached by the requireAuth middleware
|
||||
});
|
||||
});
|
||||
|
||||
app.get('/', (req, res) => {
|
||||
res.send('Backend is running!');
|
||||
});
|
||||
|
||||
// --- Server Start ---
|
||||
const startServer = async () => {
|
||||
try {
|
||||
// Configure the Meilisearch index on startup
|
||||
console.log('Configuring email index...');
|
||||
await searchService.configureEmailIndex();
|
||||
|
||||
app.listen(PORT_BACKEND, () => {
|
||||
console.log(`Backend listening at http://localhost:${PORT_BACKEND}`);
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Failed to start the server:', error);
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
startServer();
|
||||
export { createServer, ArchiverModule } from './api/server';
|
||||
export { logger } from './config/logger';
|
||||
export { config } from './config';
|
||||
export * from './services/AuthService';
|
||||
export * from './services/AuditService';
|
||||
export * from './api/middleware/requireAuth';
|
||||
export * from './api/middleware/requirePermission';
|
||||
export { db } from './database';
|
||||
export * as drizzleOrm from 'drizzle-orm';
|
||||
export * from './database/schema';
|
||||
|
||||
@@ -3,14 +3,15 @@ import { IndexingService } from '../../services/IndexingService';
|
||||
import { SearchService } from '../../services/SearchService';
|
||||
import { StorageService } from '../../services/StorageService';
|
||||
import { DatabaseService } from '../../services/DatabaseService';
|
||||
import { PendingEmail } from '@open-archiver/types';
|
||||
|
||||
const searchService = new SearchService();
|
||||
const storageService = new StorageService();
|
||||
const databaseService = new DatabaseService();
|
||||
const indexingService = new IndexingService(databaseService, searchService, storageService);
|
||||
|
||||
export default async function (job: Job<{ emailId: string }>) {
|
||||
const { emailId } = job.data;
|
||||
console.log(`Indexing email with ID: ${emailId}`);
|
||||
await indexingService.indexEmailById(emailId);
|
||||
export default async function (job: Job<{ emails: PendingEmail[] }>) {
|
||||
const { emails } = job.data;
|
||||
console.log(`Indexing email batch with ${emails.length} emails`);
|
||||
await indexingService.indexEmailBatch(emails);
|
||||
}
|
||||
@@ -1,9 +1,19 @@
|
||||
import { Job } from 'bullmq';
|
||||
import { IProcessMailboxJob, SyncState, ProcessMailboxError } from '@open-archiver/types';
|
||||
import {
|
||||
IProcessMailboxJob,
|
||||
SyncState,
|
||||
ProcessMailboxError,
|
||||
PendingEmail,
|
||||
} from '@open-archiver/types';
|
||||
import { IngestionService } from '../../services/IngestionService';
|
||||
import { logger } from '../../config/logger';
|
||||
import { EmailProviderFactory } from '../../services/EmailProviderFactory';
|
||||
import { StorageService } from '../../services/StorageService';
|
||||
import { IndexingService } from '../../services/IndexingService';
|
||||
import { SearchService } from '../../services/SearchService';
|
||||
import { DatabaseService } from '../../services/DatabaseService';
|
||||
import { config } from '../../config';
|
||||
import { indexingQueue } from '../queues';
|
||||
|
||||
/**
|
||||
* This processor handles the ingestion of emails for a single user's mailbox.
|
||||
@@ -15,9 +25,15 @@ import { StorageService } from '../../services/StorageService';
|
||||
*/
|
||||
export const processMailboxProcessor = async (job: Job<IProcessMailboxJob, SyncState, string>) => {
|
||||
const { ingestionSourceId, userEmail } = job.data;
|
||||
const BATCH_SIZE: number = config.meili.indexingBatchSize;
|
||||
let emailBatch: PendingEmail[] = [];
|
||||
|
||||
logger.info({ ingestionSourceId, userEmail }, `Processing mailbox for user`);
|
||||
|
||||
const searchService = new SearchService();
|
||||
const storageService = new StorageService();
|
||||
const databaseService = new DatabaseService();
|
||||
|
||||
try {
|
||||
const source = await IngestionService.findById(ingestionSourceId);
|
||||
if (!source) {
|
||||
@@ -26,22 +42,48 @@ export const processMailboxProcessor = async (job: Job<IProcessMailboxJob, SyncS
|
||||
|
||||
const connector = EmailProviderFactory.createConnector(source);
|
||||
const ingestionService = new IngestionService();
|
||||
const storageService = new StorageService();
|
||||
|
||||
// Pass the sync state for the entire source, the connector will handle per-user logic if necessary
|
||||
for await (const email of connector.fetchEmails(userEmail, source.syncState)) {
|
||||
// Create a callback to check for duplicates without fetching full email content
|
||||
const checkDuplicate = async (messageId: string) => {
|
||||
return await IngestionService.doesEmailExist(messageId, ingestionSourceId);
|
||||
};
|
||||
|
||||
for await (const email of connector.fetchEmails(
|
||||
userEmail,
|
||||
source.syncState,
|
||||
checkDuplicate
|
||||
)) {
|
||||
if (email) {
|
||||
await ingestionService.processEmail(email, source, storageService, userEmail);
|
||||
const processedEmail = await ingestionService.processEmail(
|
||||
email,
|
||||
source,
|
||||
storageService,
|
||||
userEmail
|
||||
);
|
||||
if (processedEmail) {
|
||||
emailBatch.push(processedEmail);
|
||||
if (emailBatch.length >= BATCH_SIZE) {
|
||||
await indexingQueue.add('index-email-batch', { emails: emailBatch });
|
||||
emailBatch = [];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (emailBatch.length > 0) {
|
||||
await indexingQueue.add('index-email-batch', { emails: emailBatch });
|
||||
emailBatch = [];
|
||||
}
|
||||
|
||||
const newSyncState = connector.getUpdatedSyncState(userEmail);
|
||||
|
||||
logger.info({ ingestionSourceId, userEmail }, `Finished processing mailbox for user`);
|
||||
|
||||
// Return the new sync state to be aggregated by the parent flow
|
||||
return newSyncState;
|
||||
} catch (error) {
|
||||
if (emailBatch.length > 0) {
|
||||
await indexingQueue.add('index-email-batch', { emails: emailBatch });
|
||||
emailBatch = [];
|
||||
}
|
||||
|
||||
logger.error({ err: error, ingestionSourceId, userEmail }, 'Error processing mailbox');
|
||||
const errorMessage = error instanceof Error ? error.message : 'An unknown error occurred';
|
||||
const processMailboxError: ProcessMailboxError = {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user