mirror of
https://github.com/LogicLabs-OU/OpenArchiver.git
synced 2026-04-06 00:31:57 +02:00
Compare commits
130 Commits
v0.1.1
...
security-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a56df62099 | ||
|
|
26a760b232 | ||
|
|
6be0774bc4 | ||
|
|
4a23f8f29f | ||
|
|
074256ed59 | ||
|
|
7d178d786b | ||
|
|
4b11cd931a | ||
|
|
0a21ad14cd | ||
|
|
63d3960f79 | ||
|
|
85a526d1b6 | ||
|
|
52a1a11973 | ||
|
|
4048f47777 | ||
|
|
22b173cbe4 | ||
|
|
774b0d7a6b | ||
|
|
85607d2ab3 | ||
|
|
94021eab69 | ||
|
|
faefdac44a | ||
|
|
392f51dabc | ||
|
|
baff1195c7 | ||
|
|
f1da17e484 | ||
|
|
a2c55f36ee | ||
|
|
9fdba4cd61 | ||
|
|
108c646596 | ||
|
|
61e44c81f7 | ||
|
|
f651aeab0e | ||
|
|
3fb4290934 | ||
|
|
8c33b63bdf | ||
|
|
2b325f3461 | ||
|
|
4d3c164bc0 | ||
|
|
7288286fd9 | ||
|
|
ec1cf3cf0b | ||
|
|
9c9152a2ee | ||
|
|
c05b3b92d9 | ||
|
|
aed0c964c8 | ||
|
|
86dda6c6d3 | ||
|
|
6e1dd17267 | ||
|
|
b4d2125020 | ||
|
|
a2ca79d3eb | ||
|
|
8f519dc995 | ||
|
|
b2ca3ef0e1 | ||
|
|
9873228d01 | ||
|
|
94190f8b7c | ||
|
|
832e29bd92 | ||
|
|
cba6dfcae1 | ||
|
|
24f5b341a8 | ||
|
|
cba7e05d98 | ||
|
|
cfdfe42fb8 | ||
|
|
9138c1c753 | ||
|
|
c4afa471cb | ||
|
|
187282c68d | ||
|
|
82a83a71e4 | ||
|
|
ff676ecb86 | ||
|
|
9ff6801afc | ||
|
|
d2b4337be9 | ||
|
|
b03791d9a6 | ||
|
|
4cbbb6cec3 | ||
|
|
f10bf93d1b | ||
|
|
512f0312ba | ||
|
|
29db34c5d8 | ||
|
|
a87000f9dc | ||
|
|
4872ed597f | ||
|
|
e02ad0355e | ||
|
|
23ebe942b2 | ||
|
|
842f8092d6 | ||
|
|
3201fbfe0b | ||
|
|
d5c9f9a14b | ||
|
|
f2a5b29105 | ||
|
|
c65d80e948 | ||
|
|
f484f72994 | ||
|
|
59ca07dd1a | ||
|
|
d74d5e5308 | ||
|
|
1ae7b2fd2f | ||
|
|
e0953e270e | ||
|
|
705b1e5311 | ||
|
|
5a2ca3bf19 | ||
|
|
f0678b3aa9 | ||
|
|
4156abcdfa | ||
|
|
d47f0c5b08 | ||
|
|
a18e34a486 | ||
|
|
be3127136f | ||
|
|
26aeaa7c2d | ||
|
|
07cc1e5075 | ||
|
|
d9971e3ff4 | ||
|
|
3ff50ec155 | ||
|
|
181f4fd46c | ||
|
|
ffaa9762af | ||
|
|
f45ed3a62a | ||
|
|
95445dcd37 | ||
|
|
0d64eff208 | ||
|
|
6fb459630e | ||
|
|
227e8d8d18 | ||
|
|
a5d3a3be86 | ||
|
|
8695f484ac | ||
|
|
ed15c0e9bd | ||
|
|
e09c82f1fe | ||
|
|
6a154a8f02 | ||
|
|
ac4dae08d2 | ||
|
|
c297e5a714 | ||
|
|
5cc24d0d67 | ||
|
|
488df16f26 | ||
|
|
e9d84fb438 | ||
|
|
32752ce90f | ||
|
|
42dc884588 | ||
|
|
563e2dcae4 | ||
|
|
b2f41062f8 | ||
|
|
4e0f6ce5df | ||
|
|
e68d9a338d | ||
|
|
a7e6b93c77 | ||
|
|
9d3e6fc22e | ||
|
|
16e6d04682 | ||
|
|
cb04da78a6 | ||
|
|
36dbd426d5 | ||
|
|
8985655a48 | ||
|
|
9b0c136fff | ||
|
|
88046c38e4 | ||
|
|
9c5922fd31 | ||
|
|
7240da7b40 | ||
|
|
898f52ac78 | ||
|
|
becd5f1490 | ||
|
|
1d907abdbd | ||
|
|
8a74838f43 | ||
|
|
6930162079 | ||
|
|
748240b16e | ||
|
|
88cb5340a7 | ||
|
|
e95093c439 | ||
|
|
a96b32e0e9 | ||
|
|
b081c802b7 | ||
|
|
7d60a8fe6e | ||
|
|
5217d24184 | ||
|
|
8c12cda370 |
55
.env.example
55
.env.example
@@ -1,26 +1,44 @@
|
||||
# Application
|
||||
|
||||
# --- Application Settings ---
|
||||
# Set to 'production' for production environments
|
||||
NODE_ENV=development
|
||||
PORT_BACKEND=4000
|
||||
PORT_FRONTEND=3000
|
||||
# The frequency of continuous email syncing. Default is every minutes, but you can change it to another value based on your needs.
|
||||
SYNC_FREQUENCY='* * * * *'
|
||||
|
||||
# --- Docker Compose Service Configuration ---
|
||||
# These variables are used by docker-compose.yml to configure the services. Leave them unchanged if you use Docker services for Postgresql, Valkey (Redis) and Meilisearch. If you decide to use your own instances of these services, you can substitute them with your own connection credentials.
|
||||
|
||||
# PostgreSQL
|
||||
DATABASE_URL="postgresql://admin:password@postgres:5432/open_archive?schema=public"
|
||||
|
||||
# Redis
|
||||
REDIS_HOST=redis
|
||||
REDIS_PORT=6379
|
||||
POSTGRES_DB=open_archive
|
||||
POSTGRES_USER=admin
|
||||
POSTGRES_PASSWORD=password
|
||||
DATABASE_URL="postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB}"
|
||||
|
||||
# Meilisearch
|
||||
MEILI_MASTER_KEY=aSampleMasterKey
|
||||
MEILI_HOST=http://meilisearch:7700
|
||||
|
||||
# Storage
|
||||
|
||||
|
||||
# Redis (We use Valkey, which is Redis-compatible and open source)
|
||||
REDIS_HOST=valkey
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=defaultredispassword
|
||||
# If you run Valkey service from Docker Compose, set the REDIS_TLS_ENABLED variable to false.
|
||||
REDIS_TLS_ENABLED=false
|
||||
|
||||
|
||||
# --- Storage Settings ---
|
||||
# Choose your storage backend. Valid options are 'local' or 's3'.
|
||||
STORAGE_TYPE=local
|
||||
# The maximum request body size to accept in bytes including while streaming. The body size can also be specified with a unit suffix for kilobytes (K), megabytes (M), or gigabytes (G). For example, 512K or 1M. Defaults to 512kb. Or the value of Infinity if you don't want any upload limit.
|
||||
BODY_SIZE_LIMIT=100M
|
||||
|
||||
# --- Local Storage Settings ---
|
||||
# The absolute path on the server where files will be stored.
|
||||
# The path inside the container where files will be stored.
|
||||
# This is mapped to a Docker volume for persistence.
|
||||
# This is only used if STORAGE_TYPE is 'local'.
|
||||
STORAGE_LOCAL_ROOT_PATH=/var/data/open-archiver
|
||||
|
||||
@@ -34,18 +52,21 @@ STORAGE_S3_REGION=
|
||||
# Set to 'true' for MinIO and other non-AWS S3 services
|
||||
STORAGE_S3_FORCE_PATH_STYLE=false
|
||||
|
||||
# --- Security & Authentication ---
|
||||
|
||||
# Rate Limiting
|
||||
# The window in milliseconds for which API requests are checked. Defaults to 60000 (1 minute).
|
||||
RATE_LIMIT_WINDOW_MS=60000
|
||||
# The maximum number of API requests allowed from an IP within the window. Defaults to 100.
|
||||
RATE_LIMIT_MAX_REQUESTS=100
|
||||
|
||||
# JWT
|
||||
JWT_SECRET="a-very-secret-key"
|
||||
# IMPORTANT: Change this to a long, random, and secret string in your .env file
|
||||
JWT_SECRET=a-very-secret-key-that-you-should-change
|
||||
JWT_EXPIRES_IN="7d"
|
||||
|
||||
|
||||
|
||||
# Admin users
|
||||
ADMIN_EMAIL=admin@local.com
|
||||
ADMIN_PASSWORD=a_strong_pass
|
||||
SUPER_API_KEY=
|
||||
|
||||
# Master Encryption Key for sensitive data
|
||||
# Master Encryption Key for sensitive data (Such as Ingestion source credentials and passwords)
|
||||
# IMPORTANT: Generate a secure, random 32-byte hex string for this
|
||||
# Use `openssl rand -hex 32` to generate a key
|
||||
# You can use `openssl rand -hex 32` to generate a key.
|
||||
ENCRYPTION_KEY=
|
||||
|
||||
27
.github/CLA-v2.md
vendored
Normal file
27
.github/CLA-v2.md
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
# Contributor License Agreement (CLA)
|
||||
|
||||
Version: 2
|
||||
|
||||
This Agreement is for your protection as a Contributor as well as the protection of the maintainers of the Open Archiver software; it does not change your rights to use your own Contributions for any other purpose. Open Archiver is developed and maintained by LogicLabs OÜ, a private limited company established under the laws of the Republic of Estonia.
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to LogicLabs OÜ. Except for the license granted herein to LogicLabs OÜ and recipients of software distributed by LogicLabs OÜ, You reserve all right, title, and interest in and to Your Contributions.
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"You" (or "Your") shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with LogicLabs OÜ. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor.
|
||||
|
||||
"Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to LogicLabs OÜ for inclusion in, or documentation of, any of the products owned or managed by LogicLabs OÜ (the "Work"). For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to LogicLabs OÜ or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, LogicLabs OÜ for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of this Agreement, You grant to LogicLabs OÜ and to recipients of software distributed by LogicLabs OÜ a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this Agreement, You grant to LogicLabs OÜ and to recipients of software distributed by LogicLabs OÜ a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. You represent that you are legally entitled to grant the above license. If your employer(s) has rights to intellectual property that you create that includes your Contributions, you represent that you have received permission to make Contributions on behalf of that employer, that your employer has waived such rights for your Contributions to LogicLabs OÜ, or that your employer has executed a separate Contributor License Agreement with LogicLabs OÜ.
|
||||
|
||||
5. You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
|
||||
|
||||
6. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
7. Should You wish to submit work that is not Your original creation, You may submit it to LogicLabs OÜ separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]".
|
||||
|
||||
8. You agree to notify LogicLabs OÜ of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
25
.github/CLA.md
vendored
Normal file
25
.github/CLA.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Contributor License Agreement (CLA)
|
||||
|
||||
This license is for your protection as a Contributor as well as the protection of the maintainers of the Open Archiver software; it does not change your rights to use your own Contributions for any other purpose. In the following, the maintainers of Open Archiver are referred to as "Open Archiver".
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to "Open Archiver". Except for the license granted herein to Open Archiver and recipients of software distributed by "Open Archiver", You reserve all right, title, and interest in and to Your Contributions.
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"You" (or "Your") shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with "Open Archiver". For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor.
|
||||
|
||||
"Contribution" shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to "Open Archiver" for inclusion in, or documentation of, any of the products owned or managed by "Open Archiver" (the "Work"). For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to "Open Archiver" or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, "Open Archiver" for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by You as "Not a Contribution."
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of this Agreement, You grant to "Open Archiver" and to recipients of software distributed by "Open Archiver" a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of this Agreement, You grant to "Open Archiver" and to recipients of software distributed by "Open Archiver" a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. You represent that you are legally entitled to grant the above license. If your employer(s) has rights to intellectual property that you create that includes your Contributions, you represent that you have received permission to make Contributions on behalf of that employer, that your employer has waived such rights for your Contributions to "Open Archiver", or that your employer has executed a separate Contributor License Agreement with "Open Archiver".
|
||||
|
||||
5. You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
|
||||
|
||||
6. You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
7. Should You wish to submit work that is not Your original creation, You may submit it to "Open Archiver" separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as "Submitted on behalf of a third-party: [named here]".
|
||||
|
||||
8. You agree to notify "Open Archiver" of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1 @@
|
||||
github: [wayneshn]
|
||||
32
.github/workflows/cla.yml
vendored
Normal file
32
.github/workflows/cla.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: 'CLA Assistant'
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
types: [opened, closed, synchronize]
|
||||
|
||||
# explicitly configure permissions, in case your GITHUB_TOKEN workflow permissions are set to read-only in repository settings
|
||||
permissions:
|
||||
actions: write
|
||||
contents: write # this can be 'read' if the signatures are in remote repository
|
||||
pull-requests: write
|
||||
statuses: write
|
||||
|
||||
jobs:
|
||||
CLAAssistant:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'CLA Assistant'
|
||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
||||
uses: contributor-assistant/github-action@v2.6.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
||||
with:
|
||||
path-to-signatures: 'signatures/version2/cla.json'
|
||||
path-to-document: 'https://github.com/LogicLabs-OU/OpenArchiver/blob/main/.github/CLA-v2.md'
|
||||
branch: 'main'
|
||||
allowlist: 'wayneshn'
|
||||
|
||||
remote-organization-name: 'LogicLabs-OU'
|
||||
remote-repository-name: 'cla-db'
|
||||
43
.github/workflows/deploy-docs.yml
vendored
Normal file
43
.github/workflows/deploy-docs.yml
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
name: Deploy Docs to GitHub Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'docs/**'
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup pnpm
|
||||
uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 10.13.1
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
cache: 'pnpm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pnpm install --frozen-lockfile
|
||||
|
||||
- name: Build documentation site
|
||||
run: pnpm docs:build
|
||||
|
||||
- name: Create CNAME file
|
||||
run: echo "docs.openarchiver.com" > docs/.vitepress/dist/CNAME
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: docs/.vitepress/dist
|
||||
41
.github/workflows/docker-deployment.yml
vendored
Normal file
41
.github/workflows/docker-deployment.yml
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
name: docker-deployment
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- '**.md'
|
||||
- 'docs/**'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Extract short SHA
|
||||
id: sha
|
||||
run: echo "sha=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: logiclabshq/open-archiver:${{ steps.sha.outputs.sha }}
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -9,7 +9,6 @@ dist
|
||||
**/meili_data/
|
||||
|
||||
# PNPM
|
||||
pnpm-lock.yaml
|
||||
pnpm-debug.log
|
||||
|
||||
# IDE
|
||||
@@ -21,3 +20,7 @@ pnpm-debug.log
|
||||
|
||||
# Dev
|
||||
.dev
|
||||
|
||||
# Vitepress
|
||||
docs/.vitepress/dist
|
||||
docs/.vitepress/cache
|
||||
|
||||
13
.prettierignore
Normal file
13
.prettierignore
Normal file
@@ -0,0 +1,13 @@
|
||||
# Ignore artifacts
|
||||
dist
|
||||
.svelte-kit
|
||||
build
|
||||
node_modules
|
||||
pnpm-lock.yaml
|
||||
meili_data/
|
||||
|
||||
## shadcn installs
|
||||
packages/frontend/src/lib/components/ui/
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
@@ -1,12 +1,11 @@
|
||||
{
|
||||
"useTabs": true,
|
||||
"singleQuote": true,
|
||||
"trailingComma": "none",
|
||||
"trailingComma": "es5",
|
||||
"semi": true,
|
||||
"tabWidth": 4,
|
||||
"printWidth": 100,
|
||||
"plugins": [
|
||||
"prettier-plugin-svelte",
|
||||
"prettier-plugin-tailwindcss"
|
||||
],
|
||||
"plugins": ["prettier-plugin-svelte", "prettier-plugin-tailwindcss"],
|
||||
"overrides": [
|
||||
{
|
||||
"files": "*.svelte",
|
||||
@@ -16,24 +16,24 @@ We pledge to act and interact in ways that are welcoming, open, and respectful.
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
- Demonstrating empathy and kindness toward other people
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
- Demonstrating empathy and kindness toward other people
|
||||
- Being respectful of differing opinions, viewpoints, and experiences
|
||||
- Giving and gracefully accepting constructive feedback
|
||||
- Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
- Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
- Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
- The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
- Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ First off, thank you for considering contributing to Open Archiver! It's people
|
||||
|
||||
Not sure where to start? You can:
|
||||
|
||||
- Look through the [open issues](https://github.com/LogicLabs-OU/OpenArchiver/issues) for bugs or feature requests.
|
||||
- Check the issues labeled `good first issue` for tasks that are a good entry point into the codebase.
|
||||
- Look through the [open issues](https://github.com/LogicLabs-OU/OpenArchiver/issues) for bugs or feature requests.
|
||||
- Check the issues labeled `good first issue` for tasks that are a good entry point into the codebase.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
@@ -29,6 +29,10 @@ If you have an idea for an enhancement, please open an issue to discuss it. This
|
||||
6. **Update the documentation** if your changes require it.
|
||||
7. **Submit a pull request** to the `main` branch of the main repository. Please provide a clear description of the problem and solution. Include the relevant issue number if applicable.
|
||||
|
||||
## Contributor License Agreement
|
||||
|
||||
By submitting a pull request to this repository, you agree to the terms and conditions of our [Contributor License Agreement](./.github/CLA.md)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project and everyone participating in it is governed by the [Open Archiver Code of Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior.
|
||||
@@ -37,13 +41,23 @@ This project and everyone participating in it is governed by the [Open Archiver
|
||||
|
||||
### Git Commit Messages
|
||||
|
||||
- Use the present tense ("Add feature" not "Added feature").
|
||||
- Use the imperative mood ("Move cursor to..." not "Moves cursor to...").
|
||||
- Limit the first line to 72 characters or less.
|
||||
- Reference issues and pull requests liberally after the first line.
|
||||
- Use the present tense ("Add feature" not "Added feature").
|
||||
- Use the imperative mood ("Move cursor to..." not "Moves cursor to...").
|
||||
- Limit the first line to 72 characters or less.
|
||||
- Reference issues and pull requests liberally after the first line.
|
||||
|
||||
### TypeScript Styleguide
|
||||
|
||||
- Follow the existing code style.
|
||||
- Use TypeScript's strict mode.
|
||||
- Avoid using `any` as a type. Define clear interfaces and types in the `packages/types` directory.
|
||||
- Follow the existing code style.
|
||||
- Use TypeScript's strict mode.
|
||||
- Avoid using `any` as a type. Define clear interfaces and types in the `packages/types` directory.
|
||||
|
||||
### Formatting
|
||||
|
||||
We use Prettier for code formatting. Before you commit new code, it is necessary to check code format by running this command from the root folder:
|
||||
|
||||
`pnpm run lint`
|
||||
|
||||
If there are any format issues, you can use the following command to fix them
|
||||
|
||||
`pnpm run format`
|
||||
|
||||
109
README.md
109
README.md
@@ -1,45 +1,76 @@
|
||||
# Open Archiver
|
||||
|
||||
> [!WARNING]
|
||||
> This project is currently in active development and has not yet reached a stable General Availability (GA) release. It is not recommended for use in production environments. We welcome contributions from the community to help us accelerate development and improve the platform. Please see our [Contributing](#contributing) section for more details.
|
||||
[](https://www.docker.com)
|
||||
[](https://www.postgresql.org/)
|
||||
[](https://www.meilisearch.com/)
|
||||
[](https://www.typescriptlang.org/)
|
||||
[](https://redis.io)
|
||||
[](https://svelte.dev/)
|
||||
|
||||
**A secure, sovereign, and affordable open-source platform for email archiving and eDiscovery.**
|
||||
**A secure, sovereign, and open-source platform for email archiving and eDiscovery.**
|
||||
|
||||
Open Archiver enables individuals and organizations to take control of their digital communication history. It provides a robust, self-hosted solution for ingesting, storing, indexing, and searching emails from major platforms, ensuring a permanent, tamper-proof record of your most critical data, free from vendor lock-in.
|
||||
Open Archiver provides a robust, self-hosted solution for archiving, storing, indexing, and searching emails from major platforms, including Google Workspace (Gmail), Microsoft 365, PST files, as well as generic IMAP-enabled email inboxes. Use Open Archiver to keep a permanent, tamper-proof record of your communication history, free from vendor lock-in.
|
||||
|
||||

|
||||
## 📸 Screenshots
|
||||
|
||||
## Vision
|
||||

|
||||
_Dashboard_
|
||||
|
||||
To provide individuals and organizations with a secure, sovereign, and affordable platform to preserve and access their digital communication history.
|
||||

|
||||
_Archived emails_
|
||||
|
||||
## Key Features
|
||||

|
||||
_Full-text search across all your emails and attachments_
|
||||
|
||||
- **Universal Ingestion**: Connect to Google Workspace, Microsoft 365, and standard IMAP servers to perform initial bulk imports and maintain continuous, real-time synchronization.
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All data is encrypted at rest.
|
||||
- **Pluggable Storage Backends**: Start with local filesystem storage and scale to S3-compatible object storage (like AWS S3 or MinIO) as your needs grow.
|
||||
- **Powerful Search & eDiscovery**: A high-performance search engine indexes the full text of emails and attachments (PDF, DOCX, etc.). The intuitive UI supports advanced search operators, filtering, and case management.
|
||||
- **Compliance & Retention**: Define granular retention policies to automatically manage the lifecycle of your data. Place legal holds on communications to prevent deletion during litigation.
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when.
|
||||
- **Role-Based Access Control (RBAC)**: Enforce the principle of least privilege with pre-defined roles for Administrators, Auditors, and End Users.
|
||||
## 👨👩👧👦 Join our community!
|
||||
|
||||
## Tech Stack
|
||||
We are committed to build an engaging community around Open Archiver, and we are inviting all of you to join our community on Discord to get real-time support and connect with the team.
|
||||
|
||||
[](https://discord.gg/MTtD7BhuTQ)
|
||||
|
||||
[](https://bsky.app/profile/openarchiver.bsky.social)
|
||||
|
||||
## 🚀 Live demo
|
||||
|
||||
Check out the live demo here: https://demo.openarchiver.com
|
||||
|
||||
Username: admin@local.com
|
||||
|
||||
Password: openarchiver_demo
|
||||
|
||||
## ✨ Key Features
|
||||
|
||||
- **Universal Ingestion**: Connect to any email provider to perform initial bulk imports and maintain continuous, real-time synchronization. Ingestion sources include:
|
||||
- IMAP connection
|
||||
- Google Workspace
|
||||
- Microsoft 365
|
||||
- PST files
|
||||
- Zipped .eml files
|
||||
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All data is encrypted at rest.
|
||||
- **Pluggable Storage Backends**: Support both local filesystem storage and S3-compatible object storage (like AWS S3 or MinIO).
|
||||
- **Powerful Search & eDiscovery**: A high-performance search engine indexes the full text of emails and attachments (PDF, DOCX, etc.).
|
||||
- **Thread discovery**: The ability to discover if an email belongs to a thread/conversation and present the context.
|
||||
- **Compliance & Retention**: Define granular retention policies to automatically manage the lifecycle of your data. Place legal holds on communications to prevent deletion during litigation (TBD).
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when (TBD).
|
||||
|
||||
## 🛠️ Tech Stack
|
||||
|
||||
Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
|
||||
- **Frontend**: SvelteKit with Svelte 5
|
||||
- **Backend**: Node.js with Express.js & TypeScript
|
||||
- **Job Queue**: BullMQ on Redis for robust, asynchronous processing
|
||||
- **Search Engine**: Meilisearch for blazingly fast and resource-efficient search
|
||||
- **Database**: PostgreSQL for metadata, user management, and audit logs
|
||||
- **Deployment**: Docker Compose for easy, one-command deployment
|
||||
- **Frontend**: SvelteKit with Svelte 5
|
||||
- **Backend**: Node.js with Express.js & TypeScript
|
||||
- **Job Queue**: BullMQ on Redis for robust, asynchronous processing. (We use Valkey as the Redis service in the Docker Compose deployment mode, but you can use Redis as well.)
|
||||
- **Search Engine**: Meilisearch for blazingly fast and resource-efficient search
|
||||
- **Database**: PostgreSQL for metadata, user management, and audit logs
|
||||
- **Deployment**: Docker Compose deployment
|
||||
|
||||
## Getting Started
|
||||
## 📦 Deployment
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- A server or local machine with at least 2GB of RAM.
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/)
|
||||
- A server or local machine with at least 4GB of RAM (2GB of RAM if you use external Postgres, Redis (Valkey) and Meilisearch instances).
|
||||
|
||||
### Installation
|
||||
|
||||
@@ -47,7 +78,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/LogicLabs-OU/OpenArchiver.git
|
||||
cd open-archiver
|
||||
cd OpenArchiver
|
||||
```
|
||||
|
||||
2. **Configure your environment:**
|
||||
@@ -57,7 +88,7 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
You will need to edit the `.env` file to set your database passwords, secret keys, and other essential configuration.
|
||||
You will need to edit the `.env` file to set your admin passwords, secret keys, and other essential configuration. Read the .env.example for how to set up.
|
||||
|
||||
3. **Run the application:**
|
||||
|
||||
@@ -65,21 +96,29 @@ Open Archiver is built on a modern, scalable, and maintainable technology stack:
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This command will build the necessary Docker images and start all the services (frontend, backend, database, etc.) in the background.
|
||||
This command will pull the pre-built Docker images and start all the services (frontend, backend, database, etc.) in the background.
|
||||
|
||||
4. **Access the application:**
|
||||
Once the services are running, you can access the Open Archiver web interface by navigating to `http://localhost:3000` in your web browser.
|
||||
|
||||
## Contributing
|
||||
## ⚙️ Data Source Configuration
|
||||
|
||||
We welcome contributions from the community! Whether you're a developer, a designer, or just an enthusiast, there are many ways to get involved.
|
||||
After deploying the application, you will need to configure one or more ingestion sources to begin archiving emails. Follow our detailed guides to connect to your email provider:
|
||||
|
||||
- **Reporting Bugs**: If you find a bug, please open an issue on our GitHub repository.
|
||||
- **Suggesting Enhancements**: Have an idea for a new feature? We'd love to hear it. Open an issue to start the discussion.
|
||||
- **Code Contributions**: If you'd like to contribute code, please fork the repository and submit a pull request.
|
||||
- [Connecting to Google Workspace](https://docs.openarchiver.com/user-guides/email-providers/google-workspace.html)
|
||||
- [Connecting to Microsoft 365](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
- [Connecting to a Generic IMAP Server](https://docs.openarchiver.com/user-guides/email-providers/imap.html)
|
||||
|
||||
## 🤝 Contributing
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
- **Reporting Bugs**: If you find a bug, please open an issue on our GitHub repository.
|
||||
- **Suggesting Enhancements**: Have an idea for a new feature? We'd love to hear it. Open an issue to start the discussion.
|
||||
- **Code Contributions**: If you'd like to contribute code, please fork the repository and submit a pull request.
|
||||
|
||||
Please read our `CONTRIBUTING.md` file for more details on our code of conduct and the process for submitting pull requests.
|
||||
|
||||
## License
|
||||
## 📈 Star History
|
||||
|
||||
This project is licensed under the AGPL-3.0 License.
|
||||
[](https://www.star-history.com/#LogicLabs-OU/OpenArchiver&Date)
|
||||
|
||||
BIN
assets/screenshots/archived-emails.png
Normal file
BIN
assets/screenshots/archived-emails.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 305 KiB |
BIN
assets/screenshots/dashboard-1.png
Normal file
BIN
assets/screenshots/dashboard-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 120 KiB |
BIN
assets/screenshots/search.png
Normal file
BIN
assets/screenshots/search.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 199 KiB |
@@ -0,0 +1,67 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
open-archiver:
|
||||
image: logiclabshq/open-archiver:latest
|
||||
container_name: open-archiver
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- '3000:3000' # Frontend
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
depends_on:
|
||||
- postgres
|
||||
- valkey
|
||||
- meilisearch
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
postgres:
|
||||
image: postgres:17-alpine
|
||||
container_name: postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-open_archive}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-admin}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
valkey:
|
||||
image: valkey/valkey:8-alpine
|
||||
container_name: valkey
|
||||
restart: unless-stopped
|
||||
command: valkey-server --requirepass ${REDIS_PASSWORD}
|
||||
volumes:
|
||||
- valkeydata:/data
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
meilisearch:
|
||||
image: getmeili/meilisearch:v1.15
|
||||
container_name: meilisearch
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
MEILI_MASTER_KEY: ${MEILI_MASTER_KEY:-aSampleMasterKey}
|
||||
volumes:
|
||||
- meilidata:/meili_data
|
||||
networks:
|
||||
- open-archiver-net
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
driver: local
|
||||
valkeydata:
|
||||
driver: local
|
||||
meilidata:
|
||||
driver: local
|
||||
archiver-data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
open-archiver-net:
|
||||
driver: bridge
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
# Dockerfile for Open Archiver
|
||||
|
||||
# 1. Build Stage: Install all dependencies and build the project
|
||||
FROM node:22-alpine AS build
|
||||
ARG BASE_IMAGE=node:22-alpine
|
||||
|
||||
# 0. Base Stage: Define all common dependencies and setup
|
||||
FROM ${BASE_IMAGE} AS base
|
||||
WORKDIR /app
|
||||
|
||||
# Install pnpm
|
||||
RUN npm install -g pnpm
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install -g pnpm
|
||||
|
||||
# Copy manifests and lockfile
|
||||
COPY package.json pnpm-workspace.yaml pnpm-lock.yaml* ./
|
||||
@@ -13,30 +16,24 @@ COPY packages/backend/package.json ./packages/backend/
|
||||
COPY packages/frontend/package.json ./packages/frontend/
|
||||
COPY packages/types/package.json ./packages/types/
|
||||
|
||||
# Install all dependencies
|
||||
RUN pnpm install --frozen-lockfile --prod=false
|
||||
# 1. Build Stage: Install all dependencies and build the project
|
||||
FROM base AS build
|
||||
COPY packages/frontend/svelte.config.js ./packages/frontend/
|
||||
|
||||
# Install all dependencies. Use --shamefully-hoist to create a flat node_modules structure
|
||||
ENV PNPM_HOME="/pnpm"
|
||||
RUN --mount=type=cache,id=pnpm,target=/pnpm/store \
|
||||
pnpm install --shamefully-hoist --frozen-lockfile --prod=false
|
||||
|
||||
# Copy the rest of the source code
|
||||
COPY . .
|
||||
|
||||
# Build all packages
|
||||
# Build all packages.
|
||||
RUN pnpm build
|
||||
|
||||
# 2. Production Stage: Install only production dependencies and copy built artifacts
|
||||
FROM node:22-alpine AS production
|
||||
WORKDIR /app
|
||||
FROM base AS production
|
||||
|
||||
# Install pnpm
|
||||
RUN npm install -g pnpm
|
||||
|
||||
# Copy manifests and lockfile
|
||||
COPY package.json pnpm-workspace.yaml pnpm-lock.yaml* ./
|
||||
COPY packages/backend/package.json ./packages/backend/
|
||||
COPY packages/frontend/package.json ./packages/frontend/
|
||||
COPY packages/types/package.json ./packages/types/
|
||||
|
||||
# Install only production dependencies
|
||||
RUN pnpm install --frozen-lockfile --prod
|
||||
|
||||
# Copy built application from build stage
|
||||
COPY --from=build /app/packages/backend/dist ./packages/backend/dist
|
||||
@@ -45,9 +42,15 @@ COPY --from=build /app/packages/types/dist ./packages/types/dist
|
||||
COPY --from=build /app/packages/backend/drizzle.config.ts ./packages/backend/drizzle.config.ts
|
||||
COPY --from=build /app/packages/backend/src/database/migrations ./packages/backend/src/database/migrations
|
||||
|
||||
# Copy the entrypoint script and make it executable
|
||||
COPY docker/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
# Expose the port the app runs on
|
||||
EXPOSE 4000
|
||||
EXPOSE 3000
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
|
||||
# Start the application
|
||||
CMD ["pnpm", "docker-start"]
|
||||
|
||||
17
docker/docker-entrypoint.sh
Executable file
17
docker/docker-entrypoint.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Exit immediately if a command exits with a non-zero status
|
||||
set -e
|
||||
|
||||
# Run pnpm install to ensure all dependencies, including native addons,
|
||||
# are built for the container's architecture. This is crucial for
|
||||
# multi-platform Docker images, as it prevents "exec format error"
|
||||
# when running on a different architecture than the one used for building.
|
||||
pnpm install --frozen-lockfile --prod
|
||||
|
||||
# Run database migrations before starting the application to prevent
|
||||
# race conditions where the app starts before the database is ready.
|
||||
pnpm db:migrate
|
||||
|
||||
# Execute the main container command
|
||||
exec "$@"
|
||||
96
docs/.vitepress/config.mts
Normal file
96
docs/.vitepress/config.mts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { defineConfig } from 'vitepress';
|
||||
|
||||
export default defineConfig({
|
||||
head: [
|
||||
[
|
||||
'script',
|
||||
{
|
||||
defer: '',
|
||||
src: 'https://analytics.zenceipt.com/script.js',
|
||||
'data-website-id': '2c8b452e-eab5-4f82-8ead-902d8f8b976f',
|
||||
},
|
||||
],
|
||||
['link', { rel: 'icon', href: '/logo-sq.svg' }],
|
||||
],
|
||||
title: 'Open Archiver Docs',
|
||||
description: 'Official documentation for the Open Archiver project.',
|
||||
themeConfig: {
|
||||
search: {
|
||||
provider: 'local',
|
||||
},
|
||||
logo: {
|
||||
src: '/logo-sq.svg',
|
||||
},
|
||||
nav: [
|
||||
{ text: 'Home', link: '/' },
|
||||
{ text: 'Github', link: 'https://github.com/LogicLabs-OU/OpenArchiver' },
|
||||
{ text: 'Website', link: 'https://openarchiver.com/' },
|
||||
{ text: 'Discord', link: 'https://discord.gg/MTtD7BhuTQ' },
|
||||
],
|
||||
sidebar: [
|
||||
{
|
||||
text: 'User Guides',
|
||||
items: [
|
||||
{ text: 'Get Started', link: '/' },
|
||||
{ text: 'Installation', link: '/user-guides/installation' },
|
||||
{
|
||||
text: 'Email Providers',
|
||||
link: '/user-guides/email-providers/',
|
||||
collapsed: true,
|
||||
items: [
|
||||
{
|
||||
text: 'Generic IMAP Server',
|
||||
link: '/user-guides/email-providers/imap',
|
||||
},
|
||||
{
|
||||
text: 'Google Workspace',
|
||||
link: '/user-guides/email-providers/google-workspace',
|
||||
},
|
||||
{
|
||||
text: 'Microsoft 365',
|
||||
link: '/user-guides/email-providers/microsoft-365',
|
||||
},
|
||||
{ text: 'EML Import', link: '/user-guides/email-providers/eml' },
|
||||
{ text: 'PST Import', link: '/user-guides/email-providers/pst' },
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'Settings',
|
||||
collapsed: true,
|
||||
items: [
|
||||
{
|
||||
text: 'System',
|
||||
link: '/user-guides/settings/system',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'API Reference',
|
||||
items: [
|
||||
{ text: 'Overview', link: '/api/' },
|
||||
{ text: 'Authentication', link: '/api/authentication' },
|
||||
{ text: 'Rate Limiting', link: '/api/rate-limiting' },
|
||||
{ text: 'Auth', link: '/api/auth' },
|
||||
{ text: 'Archived Email', link: '/api/archived-email' },
|
||||
{ text: 'Dashboard', link: '/api/dashboard' },
|
||||
{ text: 'Ingestion', link: '/api/ingestion' },
|
||||
{ text: 'Search', link: '/api/search' },
|
||||
{ text: 'Storage', link: '/api/storage' },
|
||||
],
|
||||
},
|
||||
{
|
||||
text: 'Services',
|
||||
items: [
|
||||
{ text: 'Overview', link: '/services/' },
|
||||
{ text: 'Storage Service', link: '/services/storage-service' },
|
||||
{
|
||||
text: 'IAM Service',
|
||||
items: [{ text: 'IAM Policies', link: '/services/iam-service/iam-policy' }],
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
17
docs/SUMMARY.md
Normal file
17
docs/SUMMARY.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# Table of contents
|
||||
|
||||
## User guides
|
||||
|
||||
- [Get started](index.md)
|
||||
- [Installation](user-guides/installation.md)
|
||||
- [email-providers](user-guides/email-providers/index.md)
|
||||
- [Connecting to Google Workspace](user-guides/email-providers/google-workspace.md)
|
||||
- [Connecting to a Generic IMAP Server](user-guides/email-providers/imap.md)
|
||||
- [Connecting to Microsoft 365](user-guides/email-providers/microsoft-365.md)
|
||||
|
||||
---
|
||||
|
||||
- [api](api/index.md)
|
||||
- [Ingestion Sources API Documentation](api/ingestion.md)
|
||||
- [services](services/index.md)
|
||||
- [Pluggable Storage Service (StorageService)](services/storage-service.md)
|
||||
107
docs/api/archived-email.md
Normal file
107
docs/api/archived-email.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Archived Email Service API
|
||||
|
||||
The Archived Email Service is responsible for retrieving archived emails and their details from the database and storage.
|
||||
|
||||
## Endpoints
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
|
||||
### GET /api/v1/archived-emails/ingestion-source/:ingestionSourceId
|
||||
|
||||
Retrieves a paginated list of archived emails for a specific ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :------------------ | :----- | :------------------------------------------------ |
|
||||
| `ingestionSourceId` | string | The ID of the ingestion source to get emails for. |
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
| :-------- | :----- | :------------------------------ | :------ |
|
||||
| `page` | number | The page number for pagination. | 1 |
|
||||
| `limit` | number | The number of items per page. | 10 |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** A paginated list of archived emails.
|
||||
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"sentAt": "2023-10-27T10:00:00.000Z",
|
||||
"hasAttachments": true,
|
||||
"recipients": [{ "name": "Recipient 1", "email": "recipient1@example.com" }]
|
||||
}
|
||||
],
|
||||
"total": 100,
|
||||
"page": 1,
|
||||
"limit": 10
|
||||
}
|
||||
```
|
||||
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### GET /api/v1/archived-emails/:id
|
||||
|
||||
Retrieves a single archived email by its ID, including its raw content and attachments.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :---------------------------- |
|
||||
| `id` | string | The ID of the archived email. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The archived email details.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"sentAt": "2023-10-27T10:00:00.000Z",
|
||||
"hasAttachments": true,
|
||||
"recipients": [{ "name": "Recipient 1", "email": "recipient1@example.com" }],
|
||||
"raw": "...",
|
||||
"attachments": [
|
||||
{
|
||||
"id": "attachment-id",
|
||||
"filename": "document.pdf",
|
||||
"mimeType": "application/pdf",
|
||||
"sizeBytes": 12345
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
- **404 Not Found:** The archived email with the specified ID was not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
## Service Methods
|
||||
|
||||
### `getArchivedEmails(ingestionSourceId: string, page: number, limit: number): Promise<PaginatedArchivedEmails>`
|
||||
|
||||
Retrieves a paginated list of archived emails from the database for a given ingestion source.
|
||||
|
||||
- **ingestionSourceId:** The ID of the ingestion source.
|
||||
- **page:** The page number for pagination.
|
||||
- **limit:** The number of items per page.
|
||||
- **Returns:** A promise that resolves to a `PaginatedArchivedEmails` object.
|
||||
|
||||
### `getArchivedEmailById(emailId: string): Promise<ArchivedEmail | null>`
|
||||
|
||||
Retrieves a single archived email by its ID, including its raw content and attachments.
|
||||
|
||||
- **emailId:** The ID of the archived email.
|
||||
- **Returns:** A promise that resolves to an `ArchivedEmail` object or `null` if not found.
|
||||
84
docs/api/auth.md
Normal file
84
docs/api/auth.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Auth Service API
|
||||
|
||||
The Auth Service is responsible for handling user authentication, including login and token verification.
|
||||
|
||||
## Endpoints
|
||||
|
||||
### POST /api/v1/auth/login
|
||||
|
||||
Authenticates a user and returns a JWT if the credentials are valid.
|
||||
|
||||
**Access:** Public
|
||||
|
||||
**Rate Limiting:** This endpoint is rate-limited to prevent brute-force attacks.
|
||||
|
||||
#### Request Body
|
||||
|
||||
| Field | Type | Description |
|
||||
| :--------- | :----- | :------------------------ |
|
||||
| `email` | string | The user's email address. |
|
||||
| `password` | string | The user's password. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** Authentication successful.
|
||||
|
||||
```json
|
||||
{
|
||||
"accessToken": "your.jwt.token",
|
||||
"user": {
|
||||
"id": "user-id",
|
||||
"email": "user@example.com",
|
||||
"role": "user"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **400 Bad Request:** Email or password not provided.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Email and password are required"
|
||||
}
|
||||
```
|
||||
|
||||
- **401 Unauthorized:** Invalid credentials.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Invalid credentials"
|
||||
}
|
||||
```
|
||||
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "An internal server error occurred"
|
||||
}
|
||||
```
|
||||
|
||||
## Service Methods
|
||||
|
||||
### `verifyPassword(password: string, hash: string): Promise<boolean>`
|
||||
|
||||
Compares a plain-text password with a hashed password to verify its correctness.
|
||||
|
||||
- **password:** The plain-text password.
|
||||
- **hash:** The hashed password to compare against.
|
||||
- **Returns:** A promise that resolves to `true` if the password is valid, otherwise `false`.
|
||||
|
||||
### `login(email: string, password: string): Promise<LoginResponse | null>`
|
||||
|
||||
Handles the user login process. It finds the user by email, verifies the password, and generates a JWT upon successful authentication.
|
||||
|
||||
- **email:** The user's email.
|
||||
- **password:** The user's password.
|
||||
- **Returns:** A promise that resolves to a `LoginResponse` object containing the `accessToken` and `user` details, or `null` if authentication fails.
|
||||
|
||||
### `verifyToken(token: string): Promise<AuthTokenPayload | null>`
|
||||
|
||||
Verifies the authenticity and expiration of a JWT.
|
||||
|
||||
- **token:** The JWT string to verify.
|
||||
- **Returns:** A promise that resolves to the token's `AuthTokenPayload` if valid, otherwise `null`.
|
||||
25
docs/api/authentication.md
Normal file
25
docs/api/authentication.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# API Authentication
|
||||
|
||||
To access protected API endpoints, you need to include a user-generated API key in the `X-API-KEY` header of your requests.
|
||||
|
||||
## 1. Creating an API Key
|
||||
|
||||
You can create, manage, and view your API keys through the application's user interface.
|
||||
|
||||
1. Navigate to **Settings > API Keys** in the dashboard.
|
||||
2. Click the **"Generate API Key"** button.
|
||||
3. Provide a descriptive name for your key and select an expiration period.
|
||||
4. The new API key will be displayed. **Copy this key immediately and store it in a secure location. You will not be able to see it again.**
|
||||
|
||||
## 2. Making Authenticated Requests
|
||||
|
||||
Once you have your API key, you must include it in the `X-API-KEY` header of all subsequent requests to protected API endpoints.
|
||||
|
||||
**Example:**
|
||||
|
||||
```http
|
||||
GET /api/v1/dashboard/stats
|
||||
X-API-KEY: a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2
|
||||
```
|
||||
|
||||
If the API key is missing, expired, or invalid, the API will respond with a `401 Unauthorized` status code.
|
||||
114
docs/api/dashboard.md
Normal file
114
docs/api/dashboard.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Dashboard Service API
|
||||
|
||||
The Dashboard Service provides endpoints for retrieving statistics and data for the main dashboard.
|
||||
|
||||
## Endpoints
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
|
||||
### GET /api/v1/dashboard/stats
|
||||
|
||||
Retrieves overall statistics, including the total number of archived emails, total storage used, and the number of failed ingestions in the last 7 days.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An object containing the dashboard statistics.
|
||||
|
||||
```json
|
||||
{
|
||||
"totalEmailsArchived": 12345,
|
||||
"totalStorageUsed": 54321098,
|
||||
"failedIngestionsLast7Days": 3
|
||||
}
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/ingestion-history
|
||||
|
||||
Retrieves the email ingestion history for the last 30 days, grouped by day.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An object containing the ingestion history.
|
||||
|
||||
```json
|
||||
{
|
||||
"history": [
|
||||
{
|
||||
"date": "2023-09-27T00:00:00.000Z",
|
||||
"count": 150
|
||||
},
|
||||
{
|
||||
"date": "2023-09-28T00:00:00.000Z",
|
||||
"count": 200
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/ingestion-sources
|
||||
|
||||
Retrieves a list of all ingestion sources along with their status and storage usage.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An array of ingestion source objects.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"id": "source-id-1",
|
||||
"name": "Google Workspace",
|
||||
"provider": "google",
|
||||
"status": "active",
|
||||
"storageUsed": 12345678
|
||||
},
|
||||
{
|
||||
"id": "source-id-2",
|
||||
"name": "Microsoft 365",
|
||||
"provider": "microsoft",
|
||||
"status": "error",
|
||||
"storageUsed": 87654321
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/recent-syncs
|
||||
|
||||
Retrieves a list of recent synchronization jobs. (Note: This is currently a placeholder and will return an empty array).
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An empty array.
|
||||
|
||||
```json
|
||||
[]
|
||||
```
|
||||
|
||||
### GET /api/v1/dashboard/indexed-insights
|
||||
|
||||
Retrieves insights from the indexed email data, such as the top senders.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** An object containing indexed insights.
|
||||
|
||||
```json
|
||||
{
|
||||
"topSenders": [
|
||||
{
|
||||
"sender": "user@example.com",
|
||||
"count": 42
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
18
docs/api/index.md
Normal file
18
docs/api/index.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# API Overview
|
||||
|
||||
Welcome to the Open Archiver API documentation. This section provides detailed information about the available API endpoints.
|
||||
|
||||
All API endpoints are prefixed with `/api/v1`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Before making requests to protected endpoints, you must authenticate with the API. See the [Authentication Guide](./authentication.md) for details on how to obtain and use API tokens.
|
||||
|
||||
## API Services
|
||||
|
||||
- [**Auth Service**](./auth.md): Handles user authentication.
|
||||
- [**Archived Email Service**](./archived-email.md): Manages archived emails.
|
||||
- [**Dashboard Service**](./dashboard.md): Provides data for the main dashboard.
|
||||
- [**Ingestion Service**](./ingestion.md): Manages email ingestion sources.
|
||||
- [**Search Service**](./search.md): Handles email search functionality.
|
||||
- [**Storage Service**](./storage.md): Manages file storage and downloads.
|
||||
@@ -1,206 +1,162 @@
|
||||
# Ingestion Sources API Documentation
|
||||
# Ingestion Service API
|
||||
|
||||
A comprehensive guide to using the Ingestion Sources API.
|
||||
The Ingestion Service manages ingestion sources, which are configurations for connecting to email providers and importing emails.
|
||||
|
||||
**Base Path:** `/v1/ingestion-sources`
|
||||
## Endpoints
|
||||
|
||||
---
|
||||
All endpoints in this service require authentication.
|
||||
|
||||
## Authentication
|
||||
### POST /api/v1/ingestion-sources
|
||||
|
||||
All endpoints in this API are protected and require authentication. Requests must include an `Authorization` header containing a valid Bearer token. This can be a JWT obtained from the login endpoint or a `SUPER_API_KEY` for administrative tasks.
|
||||
Creates a new ingestion source.
|
||||
|
||||
**Header Example:**
|
||||
`Authorization: Bearer <YOUR_JWT_OR_SUPER_API_KEY>`
|
||||
**Access:** Authenticated
|
||||
|
||||
---
|
||||
#### Request Body
|
||||
|
||||
## Core Concepts
|
||||
The request body should be a `CreateIngestionSourceDto` object.
|
||||
|
||||
### Ingestion Providers
|
||||
|
||||
The `provider` field determines the type of email source. Each provider requires a different configuration object, for example:
|
||||
|
||||
- `google_workspace`: For connecting to Google Workspace accounts via OAuth 2.0.
|
||||
- `microsoft_365`: For connecting to Microsoft 365 accounts via OAuth 2.0.
|
||||
- `generic_imap`: For connecting to any email server that supports IMAP.
|
||||
|
||||
### Ingestion Status
|
||||
|
||||
The `status` field tracks the state of the ingestion source.
|
||||
|
||||
- `pending_auth`: The source has been created but requires user authorization (OAuth flow).
|
||||
- `active`: The source is authenticated and ready to sync.
|
||||
- `syncing`: An import job is currently in progress.
|
||||
- `importing`: initial syncing in progress
|
||||
- `paused`: The source is temporarily disabled.
|
||||
- `error`: An error occurred during the last sync.
|
||||
|
||||
---
|
||||
|
||||
## 1. Create Ingestion Source
|
||||
|
||||
- **Method:** `POST`
|
||||
- **Path:** `/`
|
||||
- **Description:** Registers a new source for email ingestion. The `providerConfig` will vary based on the selected `provider`.
|
||||
|
||||
#### Request Body (`CreateIngestionSourceDto`)
|
||||
|
||||
- `name` (string, required): A user-friendly name for the source (e.g., "Marketing Department G-Suite").
|
||||
- `provider` (string, required): One of `google_workspace`, `microsoft_365`, or `generic_imap`.
|
||||
- `providerConfig` (object, required): Configuration specific to the provider.
|
||||
|
||||
##### `providerConfig` for `google_workspace` / `microsoft_365`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Corporate Google Workspace",
|
||||
"provider": "google_workspace",
|
||||
"providerConfig": {
|
||||
"clientId": "your-oauth-client-id.apps.googleusercontent.com",
|
||||
"clientSecret": "your-super-secret-client-secret",
|
||||
"redirectUri": "https://yourapp.com/oauth/google/callback"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### `providerConfig` for `generic_imap`
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Legacy IMAP Server",
|
||||
"provider": "generic_imap",
|
||||
"providerConfig": {
|
||||
"host": "imap.example.com",
|
||||
"port": 993,
|
||||
"secure": true,
|
||||
"username": "archive-user",
|
||||
"password": "imap-password"
|
||||
}
|
||||
```typescript
|
||||
interface CreateIngestionSourceDto {
|
||||
name: string;
|
||||
provider: 'google' | 'microsoft' | 'generic_imap';
|
||||
providerConfig: IngestionCredentials;
|
||||
}
|
||||
```
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`201 Created`):** Returns the full `IngestionSource` object, which now includes a system-generated `id` and default status.
|
||||
- **201 Created:** The newly created ingestion source.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "a1b2c3d4-e5f6-7890-1234-567890abcdef",
|
||||
"name": "Corporate Google Workspace",
|
||||
"provider": "google_workspace",
|
||||
"status": "pending_auth",
|
||||
"createdAt": "2025-07-11T12:00:00.000Z",
|
||||
"updatedAt": "2025-07-11T12:00:00.000Z",
|
||||
"providerConfig": { ... }
|
||||
}
|
||||
```
|
||||
### GET /api/v1/ingestion-sources
|
||||
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem during creation.
|
||||
Retrieves all ingestion sources.
|
||||
|
||||
---
|
||||
|
||||
## 2. Get All Ingestion Sources
|
||||
|
||||
- **Method:** `GET`
|
||||
- **Path:** `/`
|
||||
- **Description:** Retrieves a list of all configured ingestion sources for the organization.
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`200 OK`):** Returns an array of `IngestionSource` objects.
|
||||
- **200 OK:** An array of ingestion source objects.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem.
|
||||
### GET /api/v1/ingestion-sources/:id
|
||||
|
||||
---
|
||||
Retrieves a single ingestion source by its ID.
|
||||
|
||||
## 3. Get Ingestion Source by ID
|
||||
|
||||
- **Method:** `GET`
|
||||
- **Path:** `/:id`
|
||||
- **Description:** Fetches the details of a specific ingestion source.
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
- `id` (string, required): The UUID of the ingestion source.
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`200 OK`):** Returns the corresponding `IngestionSource` object.
|
||||
- **200 OK:** The ingestion source object.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
- **Error (`404 Not Found`):** Returned if no source with the given ID exists.
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem.
|
||||
### PUT /api/v1/ingestion-sources/:id
|
||||
|
||||
---
|
||||
Updates an existing ingestion source.
|
||||
|
||||
## 4. Update Ingestion Source
|
||||
|
||||
- **Method:** `PUT`
|
||||
- **Path:** `/:id`
|
||||
- **Description:** Modifies an existing ingestion source. This is useful for changing the name, pausing a source, or updating its configuration.
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
- `id` (string, required): The UUID of the ingestion source to update.
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Request Body (`UpdateIngestionSourceDto`)
|
||||
#### Request Body
|
||||
|
||||
All fields are optional. Only include the fields you want to change.
|
||||
The request body should be an `UpdateIngestionSourceDto` object.
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "Marketing Dept G-Suite (Paused)",
|
||||
"status": "paused"
|
||||
```typescript
|
||||
interface UpdateIngestionSourceDto {
|
||||
name?: string;
|
||||
provider?: 'google' | 'microsoft' | 'generic_imap';
|
||||
providerConfig?: IngestionCredentials;
|
||||
status?: 'pending_auth' | 'auth_success' | 'importing' | 'active' | 'paused' | 'error';
|
||||
}
|
||||
```
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`200 OK`):** Returns the complete, updated `IngestionSource` object.
|
||||
- **200 OK:** The updated ingestion source object.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
- **Error (`404 Not Found`):** Returned if no source with the given ID exists.
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem.
|
||||
### DELETE /api/v1/ingestion-sources/:id
|
||||
|
||||
---
|
||||
Deletes an ingestion source and all associated data.
|
||||
|
||||
## 5. Delete Ingestion Source
|
||||
|
||||
- **Method:** `DELETE`
|
||||
- **Path:** `/:id`
|
||||
- **Description:** Permanently removes an ingestion source. This action cannot be undone.
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
- `id` (string, required): The UUID of the ingestion source to delete.
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`204 No Content`):** Indicates successful deletion with no body content.
|
||||
- **204 No Content:** The ingestion source was deleted successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
- **Error (`404 Not Found`):** Returned if no source with the given ID exists.
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem.
|
||||
### POST /api/v1/ingestion-sources/:id/import
|
||||
|
||||
---
|
||||
Triggers the initial import process for an ingestion source.
|
||||
|
||||
## 6. Trigger Initial Import
|
||||
|
||||
- **Method:** `POST`
|
||||
- **Path:** `/:id/sync`
|
||||
- **Description:** Initiates the email import process for a given source. This is an asynchronous operation that enqueues a background job and immediately returns a response. The status of the source will be updated to `importing`.
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
- `id` (string, required): The UUID of the ingestion source to sync.
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **Success (`202 Accepted`):** Confirms that the sync request has been accepted for processing.
|
||||
- **202 Accepted:** The initial import was triggered successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
```json
|
||||
{
|
||||
"message": "Initial import triggered successfully."
|
||||
}
|
||||
```
|
||||
### POST /api/v1/ingestion-sources/:id/pause
|
||||
|
||||
- **Error (`404 Not Found`):** Returned if no source with the given ID exists.
|
||||
- **Error (`500 Internal Server Error`):** Indicates a server-side problem.
|
||||
Pauses an active ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The updated ingestion source object with a `paused` status.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
### POST /api/v1/ingestion-sources/:id/sync
|
||||
|
||||
Triggers a forced synchronization for an ingestion source.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### URL Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------ |
|
||||
| `id` | string | The ID of the ingestion source. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **202 Accepted:** The force sync was triggered successfully.
|
||||
- **404 Not Found:** Ingestion source not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
|
||||
51
docs/api/rate-limiting.md
Normal file
51
docs/api/rate-limiting.md
Normal file
@@ -0,0 +1,51 @@
|
||||
# Rate Limiting
|
||||
|
||||
The API implements rate limiting as a security measure to protect your instance from denial-of-service (DoS) and brute-force attacks. This is a crucial feature for maintaining the security and stability of the application.
|
||||
|
||||
## How It Works
|
||||
|
||||
The rate limiter restricts the number of requests an IP address can make within a specific time frame. These limits are configurable via environment variables to suit your security needs.
|
||||
|
||||
By default, the limits are:
|
||||
|
||||
- **100 requests** per **1 minute** per IP address.
|
||||
|
||||
If this limit is exceeded, the API will respond with an HTTP `429 Too Many Requests` status code.
|
||||
|
||||
### Response Body
|
||||
|
||||
When an IP address is rate-limited, the API will return a JSON response with the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": 429,
|
||||
"message": "Too many requests from this IP, please try again after 15 minutes"
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
You can customize the rate-limiting settings by setting the following environment variables in your `.env` file:
|
||||
|
||||
- `RATE_LIMIT_WINDOW_MS`: The time window in milliseconds. Defaults to `60000` (1 minute).
|
||||
- `RATE_LIMIT_MAX_REQUESTS`: The maximum number of requests allowed per IP address within the time window. Defaults to `100`.
|
||||
|
||||
## Handling Rate Limits
|
||||
|
||||
If you are developing a client that interacts with the API, you should handle rate limiting gracefully:
|
||||
|
||||
1. **Check the Status Code**: Monitor for a `429` HTTP status code in responses.
|
||||
2. **Implement a Retry Mechanism**: When you receive a `429` response, it is best practice to wait before retrying the request. Implementing an exponential backoff strategy is recommended.
|
||||
3. **Check Headers**: The response will include the following standard headers to help you manage your request rate:
|
||||
- `RateLimit-Limit`: The maximum number of requests allowed in the current window.
|
||||
- `RateLimit-Remaining`: The number of requests you have left in the current window.
|
||||
- `RateLimit-Reset`: The time when the rate limit window will reset, in UTC epoch seconds.
|
||||
|
||||
## Excluded Endpoints
|
||||
|
||||
Certain essential endpoints are excluded from rate limiting to ensure the application's UI remains responsive. These are:
|
||||
|
||||
- `/auth/status`
|
||||
- `/settings/system`
|
||||
|
||||
These endpoints can be called as needed without affecting your rate limit count.
|
||||
50
docs/api/search.md
Normal file
50
docs/api/search.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# Search Service API
|
||||
|
||||
The Search Service provides an endpoint for searching indexed emails.
|
||||
|
||||
## Endpoints
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
|
||||
### GET /api/v1/search
|
||||
|
||||
Performs a search query against the indexed emails.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description | Default |
|
||||
| :----------------- | :----- | :--------------------------------------------------------------------- | :------ |
|
||||
| `keywords` | string | The search query. | |
|
||||
| `page` | number | The page number for pagination. | 1 |
|
||||
| `limit` | number | The number of items per page. | 10 |
|
||||
| `matchingStrategy` | string | The matching strategy to use (`all` or `last`). | `last` |
|
||||
| `filters` | object | Key-value pairs for filtering results (e.g., `from=user@example.com`). | |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** A search result object.
|
||||
|
||||
```json
|
||||
{
|
||||
"hits": [
|
||||
{
|
||||
"id": "email-id",
|
||||
"subject": "Test Email",
|
||||
"from": "sender@example.com",
|
||||
"_formatted": {
|
||||
"subject": "<em>Test</em> Email"
|
||||
}
|
||||
}
|
||||
],
|
||||
"total": 1,
|
||||
"page": 1,
|
||||
"limit": 10,
|
||||
"totalPages": 1,
|
||||
"processingTimeMs": 5
|
||||
}
|
||||
```
|
||||
|
||||
- **400 Bad Request:** Keywords are required.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
26
docs/api/storage.md
Normal file
26
docs/api/storage.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Storage Service API
|
||||
|
||||
The Storage Service provides an endpoint for downloading files from the configured storage provider.
|
||||
|
||||
## Endpoints
|
||||
|
||||
All endpoints in this service require authentication.
|
||||
|
||||
### GET /api/v1/storage/download
|
||||
|
||||
Downloads a file from the storage.
|
||||
|
||||
**Access:** Authenticated
|
||||
|
||||
#### Query Parameters
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| :-------- | :----- | :------------------------------------------------ |
|
||||
| `path` | string | The path to the file within the storage provider. |
|
||||
|
||||
#### Responses
|
||||
|
||||
- **200 OK:** The file stream.
|
||||
- **400 Bad Request:** File path is required or invalid.
|
||||
- **404 Not Found:** File not found.
|
||||
- **500 Internal Server Error:** An unexpected error occurred.
|
||||
42
docs/index.md
Normal file
42
docs/index.md
Normal file
@@ -0,0 +1,42 @@
|
||||
# Get Started 👋
|
||||
|
||||
Welcome to Open Archiver! This guide will help you get started with setting up and using the platform.
|
||||
|
||||
## What is Open Archiver? 🛡️
|
||||
|
||||
**A secure, sovereign, and affordable open-source platform for email archiving and eDiscovery.**
|
||||
|
||||
Open Archiver provides a robust, self-hosted solution for archiving, storing, indexing, and searching emails from major platforms, including Google Workspace (Gmail), Microsoft 365, as well as generic IMAP-enabled email inboxes. Use Open Archiver to keep a permanent, tamper-proof record of your communication history, free from vendor lock-in.
|
||||
|
||||
## Key Features ✨
|
||||
|
||||
- **Universal Ingestion**: Connect to Google Workspace, Microsoft 365, and standard IMAP servers to perform initial bulk imports and maintain continuous, real-time synchronization.
|
||||
- **Secure & Efficient Storage**: Emails are stored in the standard `.eml` format. The system uses deduplication and compression to minimize storage costs. All data is encrypted at rest.
|
||||
- **Pluggable Storage Backends**: Support both local filesystem storage and S3-compatible object storage (like AWS S3 or MinIO).
|
||||
- **Powerful Search & eDiscovery**: A high-performance search engine indexes the full text of emails and attachments (PDF, DOCX, etc.).
|
||||
- **Compliance & Retention**: Define granular retention policies to automatically manage the lifecycle of your data. Place legal holds on communications to prevent deletion during litigation (TBD).
|
||||
- **Comprehensive Auditing**: An immutable audit trail logs all system activities, ensuring you have a clear record of who accessed what and when (TBD).
|
||||
|
||||
## Installation 🚀
|
||||
|
||||
To get your own instance of Open Archiver running, follow our detailed installation guide:
|
||||
|
||||
- [Installation Guide](./user-guides/installation.md)
|
||||
|
||||
## Data Source Configuration 🔌
|
||||
|
||||
After deploying the application, you will need to configure one or more ingestion sources to begin archiving emails. Follow our detailed guides to connect to your email provider:
|
||||
|
||||
- [Connecting to Google Workspace](./user-guides/email-providers/google-workspace.md)
|
||||
- [Connecting to Microsoft 365](./user-guides/email-providers/microsoft-365.md)
|
||||
- [Connecting to a Generic IMAP Server](./user-guides/email-providers/imap.md)
|
||||
|
||||
## Contributing ❤️
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
- **Reporting Bugs**: If you find a bug, please open an issue on our GitHub repository.
|
||||
- **Suggesting Enhancements**: Have an idea for a new feature? We'd love to hear it. Open an issue to start the discussion.
|
||||
- **Code Contributions**: If you'd like to contribute code, please fork the repository and submit a pull request.
|
||||
|
||||
Please read our `CONTRIBUTING.md` file for more details on our code of conduct and the process for submitting pull requests.
|
||||
19
docs/public/logo-sq.svg
Normal file
19
docs/public/logo-sq.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 164 KiB |
289
docs/services/iam-service.md
Normal file
289
docs/services/iam-service.md
Normal file
@@ -0,0 +1,289 @@
|
||||
# IAM Policies
|
||||
|
||||
This document provides a guide to creating and managing IAM policies in Open Archiver. It is intended for developers and administrators who need to configure granular access control for users and roles.
|
||||
|
||||
## Policy Structure
|
||||
|
||||
IAM policies are defined as an array of JSON objects, where each object represents a single permission rule. The structure of a policy object is as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "read" OR ["read", "create"],
|
||||
"subject": "ingestion" OR ["ingestion", "dashboard"],
|
||||
"conditions": {
|
||||
"field_name": "value"
|
||||
},
|
||||
"inverted": false OR true,
|
||||
}
|
||||
```
|
||||
|
||||
- `action`: The action(s) to be performed on the subject. Can be a single string or an array of strings.
|
||||
- `subject`: The resource(s) or entity on which the action is to be performed. Can be a single string or an array of strings.
|
||||
- `conditions`: (Optional) A set of conditions that must be met for the permission to be granted.
|
||||
- `inverted`: (Optional) When set to `true`, this inverts the rule, turning it from a "can" rule into a "cannot" rule. This is useful for creating exceptions to broader permissions.
|
||||
|
||||
## Actions
|
||||
|
||||
The following actions are available for use in IAM policies:
|
||||
|
||||
- `manage`: A wildcard action that grants all permissions on a subject (`create`, `read`, `update`, `delete`, `search`, `sync`).
|
||||
- `create`: Allows the user to create a new resource.
|
||||
- `read`: Allows the user to view a resource.
|
||||
- `update`: Allows the user to modify an existing resource.
|
||||
- `delete`: Allows the user to delete a resource.
|
||||
- `search`: Allows the user to search for resources.
|
||||
- `sync`: Allows the user to synchronize a resource.
|
||||
|
||||
## Subjects
|
||||
|
||||
The following subjects are available for use in IAM policies:
|
||||
|
||||
- `all`: A wildcard subject that represents all resources.
|
||||
- `archive`: Represents archived emails.
|
||||
- `ingestion`: Represents ingestion sources.
|
||||
- `settings`: Represents system settings.
|
||||
- `users`: Represents user accounts.
|
||||
- `roles`: Represents user roles.
|
||||
- `dashboard`: Represents the dashboard.
|
||||
|
||||
## Advanced Conditions with MongoDB-Style Queries
|
||||
|
||||
Conditions are the key to creating fine-grained access control rules. They are defined as a JSON object where each key represents a field on the subject, and the value defines the criteria for that field.
|
||||
|
||||
All conditions within a single rule are implicitly joined with an **AND** logic. This means that for a permission to be granted, the resource must satisfy _all_ specified conditions.
|
||||
|
||||
The power of this system comes from its use of a subset of [MongoDB's query language](https://www.mongodb.com/docs/manual/), which provides a flexible and expressive way to define complex rules. These rules are translated into native queries for both the PostgreSQL database (via Drizzle ORM) and the Meilisearch engine.
|
||||
|
||||
### Supported Operators and Examples
|
||||
|
||||
Here is a detailed breakdown of the supported operators with examples.
|
||||
|
||||
#### `$eq` (Equal)
|
||||
|
||||
This is the default operator. If you provide a simple key-value pair, it is treated as an equality check.
|
||||
|
||||
```json
|
||||
// This rule...
|
||||
{ "status": "active" }
|
||||
|
||||
// ...is equivalent to this:
|
||||
{ "status": { "$eq": "active" } }
|
||||
```
|
||||
|
||||
**Use Case**: Grant access to an ingestion source only if its status is `active`.
|
||||
|
||||
#### `$ne` (Not Equal)
|
||||
|
||||
Matches documents where the field value is not equal to the specified value.
|
||||
|
||||
```json
|
||||
{ "provider": { "$ne": "pst_import" } }
|
||||
```
|
||||
|
||||
**Use Case**: Allow a user to see all ingestion sources except for PST imports.
|
||||
|
||||
#### `$in` (In Array)
|
||||
|
||||
Matches documents where the field value is one of the values in the specified array.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": {
|
||||
"$in": ["INGESTION_ID_1", "INGESTION_ID_2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case**: Grant an auditor access to a specific list of ingestion sources.
|
||||
|
||||
#### `$nin` (Not In Array)
|
||||
|
||||
Matches documents where the field value is not one of the values in the specified array.
|
||||
|
||||
```json
|
||||
{ "provider": { "$nin": ["pst_import", "eml_import"] } }
|
||||
```
|
||||
|
||||
**Use Case**: Hide all manual import sources from a specific user role.
|
||||
|
||||
#### `$lt` / `$lte` (Less Than / Less Than or Equal)
|
||||
|
||||
Matches documents where the field value is less than (`$lt`) or less than or equal to (`$lte`) the specified value. This is useful for numeric or date-based comparisons.
|
||||
|
||||
```json
|
||||
{ "sentAt": { "$lt": "2024-01-01T00:00:00.000Z" } }
|
||||
```
|
||||
|
||||
#### `$gt` / `$gte` (Greater Than / Greater Than or Equal)
|
||||
|
||||
Matches documents where the field value is greater than (`$gt`) or greater than or equal to (`$gte`) the specified value.
|
||||
|
||||
```json
|
||||
{ "sentAt": { "$lt": "2024-01-01T00:00:00.000Z" } }
|
||||
```
|
||||
|
||||
#### `$exists`
|
||||
|
||||
Matches documents that have (or do not have) the specified field.
|
||||
|
||||
```json
|
||||
// Grant access only if a 'lastSyncStatusMessage' exists
|
||||
{ "lastSyncStatusMessage": { "$exists": true } }
|
||||
```
|
||||
|
||||
## Inverted Rules: Creating Exceptions with `cannot`
|
||||
|
||||
By default, all rules are "can" rules, meaning they grant permissions. However, you can create a "cannot" rule by adding `"inverted": true` to a policy object. This is extremely useful for creating exceptions to broader permissions.
|
||||
|
||||
A common pattern is to grant broad access and then use an inverted rule to carve out a specific restriction.
|
||||
|
||||
**Use Case**: Grant a user access to all ingestion sources _except_ for one specific source.
|
||||
|
||||
This is achieved with two rules:
|
||||
|
||||
1. A "can" rule that grants `read` access to the `ingestion` subject.
|
||||
2. An inverted "cannot" rule that denies `read` access for the specific ingestion `id`.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "read",
|
||||
"subject": "ingestion"
|
||||
},
|
||||
{
|
||||
"inverted": true,
|
||||
"action": "read",
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"id": "SPECIFIC_INGESTION_ID_TO_EXCLUDE"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Policy Evaluation Logic
|
||||
|
||||
The system evaluates policies by combining all relevant rules for a user. The logic is simple:
|
||||
|
||||
- A user has permission if at least one `can` rule allows it.
|
||||
- A permission is denied if a `cannot` (`"inverted": true`) rule explicitly forbids it, even if a `can` rule allows it. `cannot` rules always take precedence.
|
||||
|
||||
### Dynamic Policies with Placeholders
|
||||
|
||||
To create dynamic policies that are specific to the current user, you can use the `${user.id}` placeholder in the `conditions` object. This placeholder will be replaced with the ID of the current user at runtime.
|
||||
|
||||
## Special Permissions for User and Role Management
|
||||
|
||||
It is important to note that while `read` access to `users` and `roles` can be granted granularly, any actions that modify these resources (`create`, `update`, `delete`) are restricted to Super Admins.
|
||||
|
||||
A user must have the `{ "action": "manage", "subject": "all" }` permission (Typically a Super Admin role) to manage users and roles. This is a security measure to prevent unauthorized changes to user accounts and permissions.
|
||||
|
||||
## Policy Examples
|
||||
|
||||
Here are several examples based on the default roles in the system, demonstrating how to combine actions, subjects, and conditions to achieve specific access control scenarios.
|
||||
|
||||
### Administrator
|
||||
|
||||
This policy grants a user full access to all resources using wildcards.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "all"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### End-User
|
||||
|
||||
This policy allows a user to view the dashboard, create new ingestion sources, and fully manage the ingestion sources they own.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "read",
|
||||
"subject": "dashboard"
|
||||
},
|
||||
{
|
||||
"action": "create",
|
||||
"subject": "ingestion"
|
||||
},
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"userId": "${user.id}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "archive",
|
||||
"conditions": {
|
||||
"ingestionSource.userId": "${user.id}" // also needs to give permission to archived emails created by the user
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Global Read-Only Auditor
|
||||
|
||||
This policy grants read and search access across most of the application's resources, making it suitable for an auditor who needs to view data without modifying it.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": ["ingestion", "archive", "dashboard", "users", "roles"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Ingestion Admin
|
||||
|
||||
This policy grants full control over all ingestion sources and archives, but no other resources.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "ingestion"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Auditor for Specific Ingestion Sources
|
||||
|
||||
This policy demonstrates how to grant access to a specific list of ingestion sources using the `$in` operator.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"id": {
|
||||
"$in": ["INGESTION_ID_1", "INGESTION_ID_2"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Limit Access to a Specific Mailbox
|
||||
|
||||
This policy grants a user access to a specific ingestion source, but only allows them to see emails belonging to a single user within that source.
|
||||
|
||||
This is achieved by defining two specific `can` rules: The rule grants `read` and `search` access to the `archive` subject, but the `userEmail` must match.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": "archive",
|
||||
"conditions": {
|
||||
"userEmail": "user1@example.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
289
docs/services/iam-service/iam-policy.md
Normal file
289
docs/services/iam-service/iam-policy.md
Normal file
@@ -0,0 +1,289 @@
|
||||
# IAM Policy
|
||||
|
||||
This document provides a guide to creating and managing IAM policies in Open Archiver. It is intended for developers and administrators who need to configure granular access control for users and roles.
|
||||
|
||||
## Policy Structure
|
||||
|
||||
IAM policies are defined as an array of JSON objects, where each object represents a single permission rule. The structure of a policy object is as follows:
|
||||
|
||||
```json
|
||||
{
|
||||
"action": "read" OR ["read", "create"],
|
||||
"subject": "ingestion" OR ["ingestion", "dashboard"],
|
||||
"conditions": {
|
||||
"field_name": "value"
|
||||
},
|
||||
"inverted": false OR true,
|
||||
}
|
||||
```
|
||||
|
||||
- `action`: The action(s) to be performed on the subject. Can be a single string or an array of strings.
|
||||
- `subject`: The resource(s) or entity on which the action is to be performed. Can be a single string or an array of strings.
|
||||
- `conditions`: (Optional) A set of conditions that must be met for the permission to be granted.
|
||||
- `inverted`: (Optional) When set to `true`, this inverts the rule, turning it from a "can" rule into a "cannot" rule. This is useful for creating exceptions to broader permissions.
|
||||
|
||||
## Actions
|
||||
|
||||
The following actions are available for use in IAM policies:
|
||||
|
||||
- `manage`: A wildcard action that grants all permissions on a subject (`create`, `read`, `update`, `delete`, `search`, `sync`).
|
||||
- `create`: Allows the user to create a new resource.
|
||||
- `read`: Allows the user to view a resource.
|
||||
- `update`: Allows the user to modify an existing resource.
|
||||
- `delete`: Allows the user to delete a resource.
|
||||
- `search`: Allows the user to search for resources.
|
||||
- `sync`: Allows the user to synchronize a resource.
|
||||
|
||||
## Subjects
|
||||
|
||||
The following subjects are available for use in IAM policies:
|
||||
|
||||
- `all`: A wildcard subject that represents all resources.
|
||||
- `archive`: Represents archived emails.
|
||||
- `ingestion`: Represents ingestion sources.
|
||||
- `settings`: Represents system settings.
|
||||
- `users`: Represents user accounts.
|
||||
- `roles`: Represents user roles.
|
||||
- `dashboard`: Represents the dashboard.
|
||||
|
||||
## Advanced Conditions with MongoDB-Style Queries
|
||||
|
||||
Conditions are the key to creating fine-grained access control rules. They are defined as a JSON object where each key represents a field on the subject, and the value defines the criteria for that field.
|
||||
|
||||
All conditions within a single rule are implicitly joined with an **AND** logic. This means that for a permission to be granted, the resource must satisfy _all_ specified conditions.
|
||||
|
||||
The power of this system comes from its use of a subset of [MongoDB's query language](https://www.mongodb.com/docs/manual/), which provides a flexible and expressive way to define complex rules. These rules are translated into native queries for both the PostgreSQL database (via Drizzle ORM) and the Meilisearch engine.
|
||||
|
||||
### Supported Operators and Examples
|
||||
|
||||
Here is a detailed breakdown of the supported operators with examples.
|
||||
|
||||
#### `$eq` (Equal)
|
||||
|
||||
This is the default operator. If you provide a simple key-value pair, it is treated as an equality check.
|
||||
|
||||
```json
|
||||
// This rule...
|
||||
{ "status": "active" }
|
||||
|
||||
// ...is equivalent to this:
|
||||
{ "status": { "$eq": "active" } }
|
||||
```
|
||||
|
||||
**Use Case**: Grant access to an ingestion source only if its status is `active`.
|
||||
|
||||
#### `$ne` (Not Equal)
|
||||
|
||||
Matches documents where the field value is not equal to the specified value.
|
||||
|
||||
```json
|
||||
{ "provider": { "$ne": "pst_import" } }
|
||||
```
|
||||
|
||||
**Use Case**: Allow a user to see all ingestion sources except for PST imports.
|
||||
|
||||
#### `$in` (In Array)
|
||||
|
||||
Matches documents where the field value is one of the values in the specified array.
|
||||
|
||||
```json
|
||||
{
|
||||
"id": {
|
||||
"$in": ["INGESTION_ID_1", "INGESTION_ID_2"]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Use Case**: Grant an auditor access to a specific list of ingestion sources.
|
||||
|
||||
#### `$nin` (Not In Array)
|
||||
|
||||
Matches documents where the field value is not one of the values in the specified array.
|
||||
|
||||
```json
|
||||
{ "provider": { "$nin": ["pst_import", "eml_import"] } }
|
||||
```
|
||||
|
||||
**Use Case**: Hide all manual import sources from a specific user role.
|
||||
|
||||
#### `$lt` / `$lte` (Less Than / Less Than or Equal)
|
||||
|
||||
Matches documents where the field value is less than (`$lt`) or less than or equal to (`$lte`) the specified value. This is useful for numeric or date-based comparisons.
|
||||
|
||||
```json
|
||||
{ "sentAt": { "$lt": "2024-01-01T00:00:00.000Z" } }
|
||||
```
|
||||
|
||||
#### `$gt` / `$gte` (Greater Than / Greater Than or Equal)
|
||||
|
||||
Matches documents where the field value is greater than (`$gt`) or greater than or equal to (`$gte`) the specified value.
|
||||
|
||||
```json
|
||||
{ "sentAt": { "$lt": "2024-01-01T00:00:00.000Z" } }
|
||||
```
|
||||
|
||||
#### `$exists`
|
||||
|
||||
Matches documents that have (or do not have) the specified field.
|
||||
|
||||
```json
|
||||
// Grant access only if a 'lastSyncStatusMessage' exists
|
||||
{ "lastSyncStatusMessage": { "$exists": true } }
|
||||
```
|
||||
|
||||
## Inverted Rules: Creating Exceptions with `cannot`
|
||||
|
||||
By default, all rules are "can" rules, meaning they grant permissions. However, you can create a "cannot" rule by adding `"inverted": true` to a policy object. This is extremely useful for creating exceptions to broader permissions.
|
||||
|
||||
A common pattern is to grant broad access and then use an inverted rule to carve out a specific restriction.
|
||||
|
||||
**Use Case**: Grant a user access to all ingestion sources _except_ for one specific source.
|
||||
|
||||
This is achieved with two rules:
|
||||
|
||||
1. A "can" rule that grants `read` access to the `ingestion` subject.
|
||||
2. An inverted "cannot" rule that denies `read` access for the specific ingestion `id`.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "read",
|
||||
"subject": "ingestion"
|
||||
},
|
||||
{
|
||||
"inverted": true,
|
||||
"action": "read",
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"id": "SPECIFIC_INGESTION_ID_TO_EXCLUDE"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Policy Evaluation Logic
|
||||
|
||||
The system evaluates policies by combining all relevant rules for a user. The logic is simple:
|
||||
|
||||
- A user has permission if at least one `can` rule allows it.
|
||||
- A permission is denied if a `cannot` (`"inverted": true`) rule explicitly forbids it, even if a `can` rule allows it. `cannot` rules always take precedence.
|
||||
|
||||
### Dynamic Policies with Placeholders
|
||||
|
||||
To create dynamic policies that are specific to the current user, you can use the `${user.id}` placeholder in the `conditions` object. This placeholder will be replaced with the ID of the current user at runtime.
|
||||
|
||||
## Special Permissions for User and Role Management
|
||||
|
||||
It is important to note that while `read` access to `users` and `roles` can be granted granularly, any actions that modify these resources (`create`, `update`, `delete`) are restricted to Super Admins.
|
||||
|
||||
A user must have the `{ "action": "manage", "subject": "all" }` permission (Typically a Super Admin role) to manage users and roles. This is a security measure to prevent unauthorized changes to user accounts and permissions.
|
||||
|
||||
## Policy Examples
|
||||
|
||||
Here are several examples based on the default roles in the system, demonstrating how to combine actions, subjects, and conditions to achieve specific access control scenarios.
|
||||
|
||||
### Administrator
|
||||
|
||||
This policy grants a user full access to all resources using wildcards.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "all"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### End-User
|
||||
|
||||
This policy allows a user to view the dashboard, create new ingestion sources, and fully manage the ingestion sources they own.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "read",
|
||||
"subject": "dashboard"
|
||||
},
|
||||
{
|
||||
"action": "create",
|
||||
"subject": "ingestion"
|
||||
},
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"userId": "${user.id}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "archive",
|
||||
"conditions": {
|
||||
"ingestionSource.userId": "${user.id}" // also needs to give permission to archived emails created by the user
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Global Read-Only Auditor
|
||||
|
||||
This policy grants read and search access across most of the application's resources, making it suitable for an auditor who needs to view data without modifying it.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": ["ingestion", "archive", "dashboard", "users", "roles"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Ingestion Admin
|
||||
|
||||
This policy grants full control over all ingestion sources and archives, but no other resources.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": "manage",
|
||||
"subject": "ingestion"
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Auditor for Specific Ingestion Sources
|
||||
|
||||
This policy demonstrates how to grant access to a specific list of ingestion sources using the `$in` operator.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": "ingestion",
|
||||
"conditions": {
|
||||
"id": {
|
||||
"$in": ["INGESTION_ID_1", "INGESTION_ID_2"]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
### Limit Access to a Specific Mailbox
|
||||
|
||||
This policy grants a user access to a specific ingestion source, but only allows them to see emails belonging to a single user within that source.
|
||||
|
||||
This is achieved by defining two specific `can` rules: The rule grants `read` and `search` access to the `archive` subject, but the `userEmail` must match.
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"action": ["read", "search"],
|
||||
"subject": "archive",
|
||||
"conditions": {
|
||||
"userEmail": "user1@example.com"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
1
docs/services/index.md
Normal file
1
docs/services/index.md
Normal file
@@ -0,0 +1 @@
|
||||
# services
|
||||
@@ -14,8 +14,8 @@ The `StorageService` is configured via environment variables in the `.env` file.
|
||||
|
||||
The `STORAGE_TYPE` variable determines which provider the service will use.
|
||||
|
||||
- `STORAGE_TYPE=local`: Uses the local server's filesystem.
|
||||
- `STORAGE_TYPE=s3`: Uses an S3-compatible object storage service (e.g., AWS S3, MinIO, Google Cloud Storage).
|
||||
- `STORAGE_TYPE=local`: Uses the local server's filesystem.
|
||||
- `STORAGE_TYPE=s3`: Uses an S3-compatible object storage service (e.g., AWS S3, MinIO, Google Cloud Storage).
|
||||
|
||||
### 2. Local Filesystem Configuration
|
||||
|
||||
@@ -27,7 +27,7 @@ STORAGE_TYPE=local
|
||||
STORAGE_LOCAL_ROOT_PATH=/var/data/open-archiver
|
||||
```
|
||||
|
||||
- `STORAGE_LOCAL_ROOT_PATH`: The absolute path on the server where the archive will be created. The service will create subdirectories within this path as needed.
|
||||
- `STORAGE_LOCAL_ROOT_PATH`: The absolute path on the server where the archive will be created. The service will create subdirectories within this path as needed.
|
||||
|
||||
### 3. S3-Compatible Storage Configuration
|
||||
|
||||
@@ -44,12 +44,12 @@ STORAGE_S3_REGION=us-east-1
|
||||
STORAGE_S3_FORCE_PATH_STYLE=true
|
||||
```
|
||||
|
||||
- `STORAGE_S3_ENDPOINT`: The full URL of the S3 API endpoint.
|
||||
- `STORAGE_S3_BUCKET`: The name of the bucket to use for storage.
|
||||
- `STORAGE_S3_ACCESS_KEY_ID`: The access key for your S3 user.
|
||||
- `STORAGE_S3_SECRET_ACCESS_KEY`: The secret key for your S3 user.
|
||||
- `STORAGE_S3_REGION` (Optional): The AWS region of your bucket. Recommended for AWS S3.
|
||||
- `STORAGE_S3_FORCE_PATH_STYLE` (Optional): Set to `true` when using non-AWS S3 services like MinIO.
|
||||
- `STORAGE_S3_ENDPOINT`: The full URL of the S3 API endpoint.
|
||||
- `STORAGE_S3_BUCKET`: The name of the bucket to use for storage.
|
||||
- `STORAGE_S3_ACCESS_KEY_ID`: The access key for your S3 user.
|
||||
- `STORAGE_S3_SECRET_ACCESS_KEY`: The secret key for your S3 user.
|
||||
- `STORAGE_S3_REGION` (Optional): The AWS region of your bucket. Recommended for AWS S3.
|
||||
- `STORAGE_S3_FORCE_PATH_STYLE` (Optional): Set to `true` when using non-AWS S3 services like MinIO.
|
||||
|
||||
## How to Use the Service
|
||||
|
||||
@@ -61,31 +61,27 @@ The `StorageService` is designed to be used via dependency injection in other se
|
||||
import { StorageService } from './StorageService';
|
||||
|
||||
class IngestionService {
|
||||
private storageService: StorageService;
|
||||
private storageService: StorageService;
|
||||
|
||||
constructor() {
|
||||
// The StorageService is instantiated without any arguments.
|
||||
// It automatically reads the configuration from the environment.
|
||||
this.storageService = new StorageService();
|
||||
}
|
||||
constructor() {
|
||||
// The StorageService is instantiated without any arguments.
|
||||
// It automatically reads the configuration from the environment.
|
||||
this.storageService = new StorageService();
|
||||
}
|
||||
|
||||
public async archiveEmail(
|
||||
rawEmail: Buffer,
|
||||
userId: string,
|
||||
messageId: string
|
||||
): Promise<void> {
|
||||
// Define a structured, unique path for the email.
|
||||
const archivePath = `${userId}/messages/${messageId}.eml`;
|
||||
public async archiveEmail(rawEmail: Buffer, userId: string, messageId: string): Promise<void> {
|
||||
// Define a structured, unique path for the email.
|
||||
const archivePath = `${userId}/messages/${messageId}.eml`;
|
||||
|
||||
try {
|
||||
// Use the service. It doesn't know or care if this is writing
|
||||
// to a local disk or an S3 bucket.
|
||||
await this.storageService.put(archivePath, rawEmail);
|
||||
console.log(`Successfully archived email to ${archivePath}`);
|
||||
} catch (error) {
|
||||
console.error(`Failed to archive email ${messageId}`, error);
|
||||
}
|
||||
}
|
||||
try {
|
||||
// Use the service. It doesn't know or care if this is writing
|
||||
// to a local disk or an S3 bucket.
|
||||
await this.storageService.put(archivePath, rawEmail);
|
||||
console.log(`Successfully archived email to ${archivePath}`);
|
||||
} catch (error) {
|
||||
console.error(`Failed to archive email ${messageId}`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -99,9 +95,9 @@ The `StorageService` implements the `IStorageProvider` interface. All methods ar
|
||||
|
||||
Stores a file at the specified path. If a file already exists at that path, it will be overwritten.
|
||||
|
||||
- **`path: string`**: A unique identifier for the file, including its directory structure (e.g., `"user-123/emails/message-abc.eml"`).
|
||||
- **`content: Buffer | NodeJS.ReadableStream`**: The content of the file. It can be a `Buffer` for small files or a `ReadableStream` for large files to ensure memory efficiency.
|
||||
- **Returns**: `Promise<void>` - A promise that resolves when the file has been successfully stored.
|
||||
- **`path: string`**: A unique identifier for the file, including its directory structure (e.g., `"user-123/emails/message-abc.eml"`).
|
||||
- **`content: Buffer | NodeJS.ReadableStream`**: The content of the file. It can be a `Buffer` for small files or a `ReadableStream` for large files to ensure memory efficiency.
|
||||
- **Returns**: `Promise<void>` - A promise that resolves when the file has been successfully stored.
|
||||
|
||||
---
|
||||
|
||||
@@ -109,9 +105,9 @@ Stores a file at the specified path. If a file already exists at that path, it w
|
||||
|
||||
Retrieves a file from the specified path as a readable stream.
|
||||
|
||||
- **`path: string`**: The unique identifier of the file to retrieve.
|
||||
- **Returns**: `Promise<NodeJS.ReadableStream>` - A promise that resolves with a readable stream of the file's content.
|
||||
- **Throws**: An `Error` if the file is not found at the specified path.
|
||||
- **`path: string`**: The unique identifier of the file to retrieve.
|
||||
- **Returns**: `Promise<NodeJS.ReadableStream>` - A promise that resolves with a readable stream of the file's content.
|
||||
- **Throws**: An `Error` if the file is not found at the specified path.
|
||||
|
||||
---
|
||||
|
||||
@@ -119,8 +115,8 @@ Retrieves a file from the specified path as a readable stream.
|
||||
|
||||
Deletes a file from the storage backend.
|
||||
|
||||
- **`path: string`**: The unique identifier of the file to delete.
|
||||
- **Returns**: `Promise<void>` - A promise that resolves when the file is deleted. If the file does not exist, the promise will still resolve successfully without throwing an error.
|
||||
- **`path: string`**: The unique identifier of the file to delete.
|
||||
- **Returns**: `Promise<void>` - A promise that resolves when the file is deleted. If the file does not exist, the promise will still resolve successfully without throwing an error.
|
||||
|
||||
---
|
||||
|
||||
@@ -128,5 +124,5 @@ Deletes a file from the storage backend.
|
||||
|
||||
Checks for the existence of a file.
|
||||
|
||||
- **`path: string`**: The unique identifier of the file to check.
|
||||
- **Returns**: `Promise<boolean>` - A promise that resolves with `true` if the file exists, and `false` otherwise.
|
||||
- **`path: string`**: The unique identifier of the file to check.
|
||||
- **Returns**: `Promise<boolean>` - A promise that resolves with `true` if the file exists, and `false` otherwise.
|
||||
|
||||
36
docs/user-guides/email-providers/eml.md
Normal file
36
docs/user-guides/email-providers/eml.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# EML Import
|
||||
|
||||
OpenArchiver allows you to import EML files from a zip archive. This is useful for importing emails from a variety of sources, including other email clients and services.
|
||||
|
||||
## Preparing the Zip File
|
||||
|
||||
To ensure a successful import, you should compress your .eml files to one zip file according to the following guidelines:
|
||||
|
||||
- **Structure:** The zip file can contain any number of `.eml` files, organized in any folder structure. The folder structure will be preserved in OpenArchiver, so you can use it to organize your emails.
|
||||
- **Compression:** The zip file should be compressed using standard zip compression.
|
||||
|
||||
Here's an example of a valid folder structure:
|
||||
|
||||
```
|
||||
archive.zip
|
||||
├── inbox
|
||||
│ ├── email-01.eml
|
||||
│ └── email-02.eml
|
||||
├── sent
|
||||
│ └── email-03.eml
|
||||
└── drafts
|
||||
├── nested-folder
|
||||
│ └── email-04.eml
|
||||
└── email-05.eml
|
||||
```
|
||||
|
||||
## Creating an EML Ingestion Source
|
||||
|
||||
1. Go to the **Ingestion Sources** page in the OpenArchiver dashboard.
|
||||
2. Click the **Create New** button.
|
||||
3. Select **EML Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. Click the **Choose File** button and select the zip archive containing your EML files.
|
||||
6. Click the **Submit** button.
|
||||
|
||||
OpenArchiver will then start importing the EML files from the zip archive. The ingestion process may take some time, depending on the size of the archive.
|
||||
124
docs/user-guides/email-providers/google-workspace.md
Normal file
124
docs/user-guides/email-providers/google-workspace.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Connecting to Google Workspace
|
||||
|
||||
This guide provides instructions for Google Workspace administrators to set up a connection that allows the archiving of all user mailboxes within their organization.
|
||||
|
||||
The connection uses a **Google Cloud Service Account** with **Domain-Wide Delegation**. This is a secure method that grants the archiving service permission to access user data on behalf of the administrator, without requiring individual user passwords or consent.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- You must have **Super Administrator** privileges in your Google Workspace account.
|
||||
- You must have access to the **Google Cloud Console** associated with your organization.
|
||||
|
||||
## Setup Overview
|
||||
|
||||
The setup process involves three main parts:
|
||||
|
||||
1. Configuring the necessary permissions in the Google Cloud Console.
|
||||
2. Authorizing the service account in the Google Workspace Admin Console.
|
||||
3. Entering the generated credentials into the OpenArchiver application.
|
||||
|
||||
---
|
||||
|
||||
### Part 1: Google Cloud Console Setup
|
||||
|
||||
In this part, you will create a service account and enable the APIs it needs to function.
|
||||
|
||||
1. **Create a Google Cloud Project:**
|
||||
- Go to the [Google Cloud Console](https://console.cloud.google.com/).
|
||||
- If you don't already have one, create a new project for the archiving service (e.g., "Email Archiver").
|
||||
|
||||
2. **Enable Required APIs:**
|
||||
- In your selected project, navigate to the **"APIs & Services" > "Library"** section.
|
||||
- Search for and enable the following two APIs:
|
||||
- **Gmail API**
|
||||
- **Admin SDK API**
|
||||
|
||||
3. **Create a Service Account:**
|
||||
- Navigate to **"IAM & Admin" > "Service Accounts"**.
|
||||
- Click **"Create Service Account"**.
|
||||
- Give the service account a name (e.g., `email-archiver-service`) and a description.
|
||||
- Click **"Create and Continue"**. You do not need to grant this service account any roles on the project. Click **"Done"**.
|
||||
|
||||
4. **Generate a JSON Key:**
|
||||
- Find the service account you just created in the list.
|
||||
- Click the three-dot menu under **"Actions"** and select **"Manage keys"**.
|
||||
- Click **"Add Key"** > **"Create new key"**.
|
||||
- Select **JSON** as the key type and click **"Create"**.
|
||||
- A JSON file will be downloaded to your computer. **Keep this file secure, as it contains private credentials.** You will need the contents of this file in Part 3.
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### Error: "iam.disableServiceAccountKeyCreation"
|
||||
|
||||
If you receive an error message stating `The organization policy constraint 'iam.disableServiceAccountKeyCreation' is enforced` when trying to create a JSON key, it means your Google Cloud organization has a policy preventing the creation of new service account keys.
|
||||
|
||||
To resolve this, you must have **Organization Administrator** permissions.
|
||||
|
||||
1. **Navigate to your Organization:** In the Google Cloud Console, use the project selector at the top of the page to select your organization node (it usually has a building icon).
|
||||
2. **Go to IAM:** From the navigation menu, select **"IAM & Admin" > "IAM"**.
|
||||
3. **Edit Your Permissions:** Find your user account in the list and click the pencil icon to edit roles. Add the following two roles:
|
||||
- `Organization Policy Administrator`
|
||||
- `Organization Administrator`
|
||||
_Note: These roles are only available at the organization level, not the project level._
|
||||
4. **Modify the Policy:**
|
||||
- Navigate to **"IAM & Admin" > "Organization Policies"**.
|
||||
- In the filter box, search for the policy **"iam.disableServiceAccountKeyCreation"**.
|
||||
- Click on the policy to edit it.
|
||||
- You can either disable the policy entirely (if your security rules permit) or add a rule to exclude the specific project you are using for the archiver from this policy.
|
||||
5. **Retry Key Creation:** Once the policy is updated, return to your project and you should be able to generate the JSON key as described in Part 1.
|
||||
|
||||
---
|
||||
|
||||
### Part 2: Grant Domain-Wide Delegation
|
||||
|
||||
Now, you will authorize the service account you created to access data from your Google Workspace.
|
||||
|
||||
1. **Get the Service Account's Client ID:**
|
||||
- Go back to the list of service accounts in the Google Cloud Console.
|
||||
- Click on the service account you created.
|
||||
- Under the **"Details"** tab, find and copy the **Unique ID** (this is the Client ID).
|
||||
|
||||
2. **Authorize the Client in Google Workspace:**
|
||||
- Go to your **Google Workspace Admin Console** at [admin.google.com](https://admin.google.com).
|
||||
- Navigate to **Security > Access and data control > API controls**.
|
||||
- Under the "Domain-wide Delegation" section, click **"Manage Domain-wide Delegation"**.
|
||||
- Click **"Add new"**.
|
||||
|
||||
3. **Enter Client Details and Scopes:**
|
||||
- In the **Client ID** field, paste the **Unique ID** you copied from the service account.
|
||||
- In the **OAuth scopes** field, paste the following two scopes exactly as they appear, separated by a comma:
|
||||
```
|
||||
https://www.googleapis.com/auth/admin.directory.user.readonly,https://www.googleapis.com/auth/gmail.readonly
|
||||
```
|
||||
- Click **"Authorize"**.
|
||||
|
||||
The service account is now permitted to list users and read their email data across your domain.
|
||||
|
||||
---
|
||||
|
||||
### Part 3: Connecting in OpenArchiver
|
||||
|
||||
Finally, you will provide the generated credentials to the application.
|
||||
|
||||
1. **Navigate to Ingestion Sources:**
|
||||
From the main dashboard, go to the **Ingestion Sources** page.
|
||||
|
||||
2. **Create a New Source:**
|
||||
Click the **"Create New"** button.
|
||||
|
||||
3. **Fill in the Configuration Details:**
|
||||
- **Name:** Give the source a name (e.g., "Google Workspace Archive").
|
||||
- **Provider:** Select **"Google Workspace"** from the dropdown.
|
||||
- **Service Account Key (JSON):** Open the JSON file you downloaded in Part 1. Copy the entire content of the file and paste it into this text area.
|
||||
- **Impersonated Admin Email:** Enter the email address of a Super Administrator in your Google Workspace (e.g., `admin@your-domain.com`). The service will use this user's authority to discover all other users.
|
||||
|
||||
4. **Save Changes:**
|
||||
Click **"Save changes"**.
|
||||
|
||||
## What Happens Next?
|
||||
|
||||
Once the connection is saved and verified, the system will begin the archiving process:
|
||||
|
||||
1. **User Discovery:** The service will first connect to the Admin SDK to get a list of all active users in your Google Workspace.
|
||||
2. **Initial Import:** The system will then start a background job to import the mailboxes of all discovered users. The status will show as **"Importing"**. This can take a significant amount of time depending on the number of users and the size of their mailboxes.
|
||||
3. **Continuous Sync:** After the initial import is complete, the status will change to **"Active"**. The system will then periodically check each user's mailbox for new emails and archive them automatically.
|
||||
66
docs/user-guides/email-providers/imap.md
Normal file
66
docs/user-guides/email-providers/imap.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# Connecting to a Generic IMAP Server
|
||||
|
||||
This guide will walk you through connecting a standard IMAP email account as an ingestion source. This allows you to archive emails from any provider that supports the IMAP protocol, which is common for many self-hosted or traditional email services.
|
||||
|
||||
## Step-by-Step Guide
|
||||
|
||||
1. **Navigate to Ingestion Sources:**
|
||||
From the main dashboard, go to the **Ingestions** page.
|
||||
|
||||
2. **Create a New Source:**
|
||||
Click the **"Create New"** button to open the ingestion source configuration dialog.
|
||||
|
||||
3. **Fill in the Configuration Details:**
|
||||
You will see a form with several fields. Here is how to fill them out for an IMAP connection:
|
||||
- **Name:** Give your ingestion source a descriptive name that you will easily recognize, such as "Work Email (IMAP)" or "Personal Gmail".
|
||||
|
||||
- **Provider:** From the dropdown menu, select **"Generic IMAP"**. This will reveal the specific fields required for an IMAP connection.
|
||||
|
||||
- **Host:** Enter the server address for your email provider's IMAP service. This often looks like `imap.your-provider.com` or `mail.your-domain.com`.
|
||||
|
||||
- **Port:** Enter the port number for the IMAP server. For a secure connection (which is strongly recommended), this is typically `993`.
|
||||
|
||||
- **Username:** Enter the full email address or username you use to log in to your email account.
|
||||
|
||||
- **Password:** Enter the password for your email account.
|
||||
|
||||
4. **Save Changes:**
|
||||
Once you have filled in all the details, click the **"Save changes"** button.
|
||||
|
||||
## Security Recommendation: Use an App Password
|
||||
|
||||
For enhanced security, we strongly recommend using an **"app password"** (sometimes called an "app-specific password") instead of your main account password.
|
||||
|
||||
Many email providers (like Gmail, Outlook, and Fastmail) allow you to generate a unique password that grants access only to a specific application (in this case, the archiving service). If you ever need to revoke access, you can simply delete the app password without affecting your main account login.
|
||||
|
||||
Please consult your email provider's documentation to see if they support app passwords and how to create one.
|
||||
|
||||
### How to Obtain an App Password for Gmail
|
||||
|
||||
1. **Enable 2-Step Verification:** You must have 2-Step Verification turned on for your Google Account.
|
||||
2. **Go to App Passwords:** Visit [myaccount.google.com/apppasswords](https://myaccount.google.com/apppasswords). You may be asked to sign in again.
|
||||
3. **Create the Password:**
|
||||
- At the bottom, click **"Select app"** and choose **"Other (Custom name)"**.
|
||||
- Give it a name you'll recognize, like "OpenArchiver".
|
||||
- Click **"Generate"**.
|
||||
4. **Use the Password:** A 16-digit password will be displayed. Copy this password and paste it into the **Password** field in the OpenArchiver ingestion source form.
|
||||
|
||||
### How to Obtain an App Password for Outlook/Microsoft Accounts
|
||||
|
||||
1. **Enable Two-Step Verification:** You must have two-step verification enabled for your Microsoft account.
|
||||
2. **Go to Security Options:** Sign in to your Microsoft account and navigate to the [Advanced security options](https://account.live.com/proofs/manage/additional).
|
||||
3. **Create a New App Password:**
|
||||
- Scroll down to the **"App passwords"** section.
|
||||
- Click **"Create a new app password"**.
|
||||
4. **Use the Password:** A new password will be generated. Use this password in the **Password** field in the OpenArchiver ingestion source form.
|
||||
|
||||
## What Happens Next?
|
||||
|
||||
After you save the connection, the system will attempt to connect to the IMAP server. The status of the ingestion source will update to reflect its current state:
|
||||
|
||||
- **Importing:** The system is performing the initial, one-time import of all emails from your `INBOX`. This may take a while depending on the size of your mailbox.
|
||||
- **Active:** The initial import is complete, and the system will now periodically check for and archive new emails.
|
||||
- **Paused:** The connection is valid, but the system will not check for new emails until you resume it.
|
||||
- **Error:** The system was unable to connect using the provided credentials. Please double-check your Host, Port, Username, and Password and try again.
|
||||
|
||||
You can view, edit, pause, or manually sync any of your ingestion sources from the main table on the **Ingestions** page.
|
||||
11
docs/user-guides/email-providers/index.md
Normal file
11
docs/user-guides/email-providers/index.md
Normal file
@@ -0,0 +1,11 @@
|
||||
# Connecting Email Providers
|
||||
|
||||
Open Archiver can connect to a variety of email sources to ingest and archive your emails. This section provides guides for connecting to popular email providers.
|
||||
|
||||
Choose your provider from the list below to get started:
|
||||
|
||||
- [Google Workspace](./google-workspace.md)
|
||||
- [Microsoft 365](./microsoft-365.md)
|
||||
- [Generic IMAP Server](./imap.md)
|
||||
- [EML Import](./eml.md)
|
||||
- [PST Import](./pst.md)
|
||||
93
docs/user-guides/email-providers/microsoft-365.md
Normal file
93
docs/user-guides/email-providers/microsoft-365.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# Connecting to Microsoft 365
|
||||
|
||||
This guide provides instructions for Microsoft 365 administrators to set up a connection that allows the archiving of all user mailboxes within their organization.
|
||||
|
||||
The connection uses the **Microsoft Graph API** and an **App Registration** in Microsoft Entra ID. This is a secure, standard method that grants the archiving service permission to read email data on your behalf without ever needing to handle user passwords.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- You must have one of the following administrator roles in your Microsoft 365 tenant: **Global Administrator**, **Application Administrator**, or **Cloud Application Administrator**.
|
||||
|
||||
## Setup Overview
|
||||
|
||||
The setup process involves four main parts, all performed within the Microsoft Entra admin center and the OpenArchiver application:
|
||||
|
||||
1. Registering a new application identity for the archiver in Entra ID.
|
||||
2. Granting the application the specific permissions it needs to read mail.
|
||||
3. Creating a secure password (a client secret) for the application.
|
||||
4. Entering the generated credentials into the OpenArchiver application.
|
||||
|
||||
---
|
||||
|
||||
### Part 1: Register a New Application in Microsoft Entra ID
|
||||
|
||||
First, you will create an "App registration," which acts as an identity for the archiving service within your Microsoft 365 ecosystem.
|
||||
|
||||
1. Sign in to the [Microsoft Entra admin center](https://entra.microsoft.com).
|
||||
2. In the left-hand navigation pane, go to **Identity > Applications > App registrations**.
|
||||
3. Click the **+ New registration** button at the top of the page.
|
||||
4. On the "Register an application" screen:
|
||||
- **Name:** Give the application a descriptive name you will recognize, such as `OpenArchiver Service`.
|
||||
- **Supported account types:** Select **"Accounts in this organizational directory only (Default Directory only - Single tenant)"**. This is the most secure option.
|
||||
- **Redirect URI (optional):** You can leave this blank.
|
||||
5. Click the **Register** button. You will be taken to the application's main "Overview" page.
|
||||
|
||||
---
|
||||
|
||||
### Part 2: Grant API Permissions
|
||||
|
||||
Next, you must grant the application the specific permissions required to read user profiles and their mailboxes.
|
||||
|
||||
1. From your new application's page, select **API permissions** from the left-hand menu.
|
||||
2. Click the **+ Add a permission** button.
|
||||
3. In the "Request API permissions" pane, select **Microsoft Graph**.
|
||||
4. Select **Application permissions**. This is critical as it allows the service to run in the background without a user being signed in.
|
||||
5. In the "Select permissions" search box, find and check the boxes for the following two permissions:
|
||||
- `Mail.Read`
|
||||
- `User.Read.All`
|
||||
6. Click the **Add permissions** button at the bottom.
|
||||
7. **Crucial Final Step:** You will now see the permissions in your list with a warning status. You must grant consent on behalf of your organization. Click the **"Grant admin consent for [Your Organization's Name]"** button located above the permissions table. Click **Yes** in the confirmation dialog. The status for both permissions should now show a green checkmark.
|
||||
|
||||
---
|
||||
|
||||
### Part 3: Create a Client Secret
|
||||
|
||||
The client secret is a password that the archiving service will use to authenticate. Treat this with the same level of security as an administrator's password.
|
||||
|
||||
1. In your application's menu, navigate to **Certificates & secrets**.
|
||||
2. Select the **Client secrets** tab and click **+ New client secret**.
|
||||
3. In the pane that appears:
|
||||
- **Description:** Enter a clear description, such as `OpenArchiver Key`.
|
||||
- **Expires:** Select an expiry duration. We recommend **12 or 24 months**. Set a calendar reminder to renew it before it expires to prevent service interruption.
|
||||
4. Click **Add**.
|
||||
5. **IMMEDIATELY COPY THE SECRET:** The secret is now visible in the **"Value"** column. This is the only time it will be fully displayed. Copy this value now and store it in a secure password manager before navigating away. If you lose it, you must create a new one.
|
||||
|
||||
---
|
||||
|
||||
### Part 4: Connecting in OpenArchiver
|
||||
|
||||
You now have the three pieces of information required to configure the connection.
|
||||
|
||||
1. **Navigate to Ingestion Sources:**
|
||||
In the OpenArchiver application, go to the **Ingestion Sources** page.
|
||||
|
||||
2. **Create a New Source:**
|
||||
Click the **"Create New"** button.
|
||||
|
||||
3. **Fill in the Configuration Details:**
|
||||
- **Name:** Give the source a name (e.g., "Microsoft 365 Archive").
|
||||
- **Provider:** Select **"Microsoft 365"** from the dropdown.
|
||||
- **Application (Client) ID:** Go to the **Overview** page of your app registration in the Entra admin center and copy this value.
|
||||
- **Directory (Tenant) ID:** This value is also on the **Overview** page.
|
||||
- **Client Secret Value:** Paste the secret **Value** (not the Secret ID) that you copied and saved in the previous step.
|
||||
|
||||
4. **Save Changes:**
|
||||
Click **"Save changes"**.
|
||||
|
||||
## What Happens Next?
|
||||
|
||||
Once the connection is saved, the system will begin the archiving process:
|
||||
|
||||
1. **User Discovery:** The service will connect to the Microsoft Graph API to get a list of all users in your organization.
|
||||
2. **Initial Import:** The system will begin a background job to import the mailboxes of all discovered users, folder by folder. The status will show as **"Importing"**. This can take a significant amount of time.
|
||||
3. **Continuous Sync:** After the initial import, the status will change to **"Active"**. The system will use Microsoft Graph's delta query feature to efficiently fetch only new or changed emails, ensuring the archive stays up-to-date.
|
||||
21
docs/user-guides/email-providers/pst.md
Normal file
21
docs/user-guides/email-providers/pst.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# PST Import
|
||||
|
||||
OpenArchiver allows you to import PST files. This is useful for importing emails from a variety of sources, including Microsoft Outlook.
|
||||
|
||||
## Preparing the PST File
|
||||
|
||||
To ensure a successful import, you should prepare your PST file according to the following guidelines:
|
||||
|
||||
- **Structure:** The PST file can contain any number of emails, organized in any folder structure. The folder structure will be preserved in OpenArchiver, so you can use it to organize your emails.
|
||||
- **Password Protection:** OpenArchiver does not support password-protected PST files. Please remove the password from your PST file before importing it.
|
||||
|
||||
## Creating a PST Ingestion Source
|
||||
|
||||
1. Go to the **Ingestion Sources** page in the OpenArchiver dashboard.
|
||||
2. Click the **Create New** button.
|
||||
3. Select **PST Import** as the provider.
|
||||
4. Enter a name for the ingestion source.
|
||||
5. Click the **Choose File** button and select the PST file.
|
||||
6. Click the **Submit** button.
|
||||
|
||||
OpenArchiver will then start importing the emails from the PST file. The ingestion process may take some time, depending on the size of the file.
|
||||
329
docs/user-guides/installation.md
Normal file
329
docs/user-guides/installation.md
Normal file
@@ -0,0 +1,329 @@
|
||||
# Installation Guide
|
||||
|
||||
This guide will walk you through setting up Open Archiver using Docker Compose. This is the recommended method for deploying the application.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) installed on your server or local machine.
|
||||
- A server or local machine with at least 4GB of RAM (2GB of RAM if you use external Postgres, Redis (Valkey) and Meilisearch instances).
|
||||
- [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) installed on your server or local machine.
|
||||
|
||||
## 1. Clone the Repository
|
||||
|
||||
First, clone the Open Archiver repository to your machine:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/LogicLabs-OU/OpenArchiver.git
|
||||
cd OpenArchiver
|
||||
```
|
||||
|
||||
## 2. Configure Your Environment
|
||||
|
||||
The application is configured using environment variables. You'll need to create a `.env` file to store your configuration.
|
||||
|
||||
Copy the example environment file for Docker:
|
||||
|
||||
```bash
|
||||
cp .env.example.docker .env
|
||||
```
|
||||
|
||||
Now, open the `.env` file in a text editor and customize the settings.
|
||||
|
||||
### Important Configuration
|
||||
|
||||
You must change the following placeholder values to secure your instance:
|
||||
|
||||
- `POSTGRES_PASSWORD`: A strong, unique password for the database.
|
||||
- `REDIS_PASSWORD`: A strong, unique password for the Valkey/Redis service.
|
||||
- `MEILI_MASTER_KEY`: A complex key for Meilisearch.
|
||||
- `JWT_SECRET`: A long, random string for signing authentication tokens.
|
||||
- `ENCRYPTION_KEY`: A 32-byte hex string for encrypting sensitive data in the database. You can generate one with the following command:
|
||||
```bash
|
||||
openssl rand -hex 32
|
||||
```
|
||||
|
||||
### Storage Configuration
|
||||
|
||||
By default, the Docker Compose setup uses local filesystem storage, which is persisted using a Docker volume named `archiver-data`. This is suitable for most use cases.
|
||||
|
||||
If you want to use S3-compatible object storage, change the `STORAGE_TYPE` to `s3` and fill in your S3 credentials (`STORAGE_S3_*` variables). When `STORAGE_TYPE` is set to `local`, the S3-related variables are not required.
|
||||
|
||||
### Using External Services
|
||||
|
||||
For convenience, the `docker-compose.yml` file includes services for PostgreSQL, Valkey (Redis), and Meilisearch. However, you can use your own external or managed instances for these services.
|
||||
|
||||
To do so:
|
||||
|
||||
1. **Update your `.env` file**: Change the host, port, and credential variables to point to your external service instances. For example, you would update `DATABASE_URL`, `REDIS_HOST`, and `MEILI_HOST`.
|
||||
2. **Modify `docker-compose.yml`**: Remove or comment out the service definitions for `postgres`, `valkey`, and `meilisearch` from your `docker-compose.yml` file.
|
||||
|
||||
This will configure the Open Archiver application to connect to your services instead of starting the default ones.
|
||||
|
||||
### Environment Variable Reference
|
||||
|
||||
Here is a complete list of environment variables available for configuration:
|
||||
|
||||
#### Application Settings
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ---------------- | ----------------------------------------------------------------------------------------------------- | ------------- |
|
||||
| `NODE_ENV` | The application environment. | `development` |
|
||||
| `PORT_BACKEND` | The port for the backend service. | `4000` |
|
||||
| `PORT_FRONTEND` | The port for the frontend service. | `3000` |
|
||||
| `SYNC_FREQUENCY` | The frequency of continuous email syncing. See [cron syntax](https://crontab.guru/) for more details. | `* * * * *` |
|
||||
|
||||
#### Docker Compose Service Configuration
|
||||
|
||||
These variables are used by `docker-compose.yml` to configure the services.
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ------------------- | ----------------------------------------------- | -------------------------------------------------------- |
|
||||
| `POSTGRES_DB` | The name of the PostgreSQL database. | `open_archive` |
|
||||
| `POSTGRES_USER` | The username for the PostgreSQL database. | `admin` |
|
||||
| `POSTGRES_PASSWORD` | The password for the PostgreSQL database. | `password` |
|
||||
| `DATABASE_URL` | The connection URL for the PostgreSQL database. | `postgresql://admin:password@postgres:5432/open_archive` |
|
||||
| `MEILI_MASTER_KEY` | The master key for Meilisearch. | `aSampleMasterKey` |
|
||||
| `MEILI_HOST` | The host for the Meilisearch service. | `http://meilisearch:7700` |
|
||||
| `REDIS_HOST` | The host for the Valkey (Redis) service. | `valkey` |
|
||||
| `REDIS_PORT` | The port for the Valkey (Redis) service. | `6379` |
|
||||
| `REDIS_PASSWORD` | The password for the Valkey (Redis) service. | `defaultredispassword` |
|
||||
| `REDIS_TLS_ENABLED` | Enable or disable TLS for Redis. | `false` |
|
||||
|
||||
#### Storage Settings
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| ------------------------------ | ----------------------------------------------------------------------------------------------------------- | ------------------------- |
|
||||
| `STORAGE_TYPE` | The storage backend to use (`local` or `s3`). | `local` |
|
||||
| `BODY_SIZE_LIMIT` | The maximum request body size for uploads. Can be a number in bytes or a string with a unit (e.g., `100M`). | `100M` |
|
||||
| `STORAGE_LOCAL_ROOT_PATH` | The root path for local file storage. | `/var/data/open-archiver` |
|
||||
| `STORAGE_S3_ENDPOINT` | The endpoint for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_BUCKET` | The bucket name for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_ACCESS_KEY_ID` | The access key ID for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_SECRET_ACCESS_KEY` | The secret access key for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_REGION` | The region for S3-compatible storage (required if `STORAGE_TYPE` is `s3`). | |
|
||||
| `STORAGE_S3_FORCE_PATH_STYLE` | Force path-style addressing for S3 (optional). | `false` |
|
||||
|
||||
#### Security & Authentication
|
||||
|
||||
| Variable | Description | Default Value |
|
||||
| -------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
|
||||
| `JWT_SECRET` | A secret key for signing JWT tokens. | `a-very-secret-key-that-you-should-change` |
|
||||
| `JWT_EXPIRES_IN` | The expiration time for JWT tokens. | `7d` |
|
||||
| ~~`SUPER_API_KEY`~~ (Deprecated) | An API key with super admin privileges. (The SUPER_API_KEY is deprecated since v0.3.0 after we roll out the role-based access control system.) | |
|
||||
| `RATE_LIMIT_WINDOW_MS` | The window in milliseconds for which API requests are checked. | `900000` (15 minutes) |
|
||||
| `RATE_LIMIT_MAX_REQUESTS` | The maximum number of API requests allowed from an IP within the window. | `100` |
|
||||
| `ENCRYPTION_KEY` | A 32-byte hex string for encrypting sensitive data in the database. | |
|
||||
|
||||
## 3. Run the Application
|
||||
|
||||
Once you have configured your `.env` file, you can start all the services using Docker Compose:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This command will:
|
||||
|
||||
- Pull the required Docker images for the frontend, backend, database, and other services.
|
||||
- Create and start the containers in the background (`-d` flag).
|
||||
- Create the persistent volumes for your data.
|
||||
|
||||
You can check the status of the running containers with:
|
||||
|
||||
```bash
|
||||
docker compose ps
|
||||
```
|
||||
|
||||
## 4. Access the Application
|
||||
|
||||
Once the services are running, you can access the Open Archiver web interface by navigating to `http://localhost:3000` in your web browser.
|
||||
|
||||
You can log in with the `ADMIN_EMAIL` and `ADMIN_PASSWORD` you configured in your `.env` file.
|
||||
|
||||
## 5. Next Steps
|
||||
|
||||
After successfully deploying and logging into Open Archiver, the next step is to configure your ingestion sources to start archiving emails.
|
||||
|
||||
- [Connecting to Google Workspace](./email-providers/google-workspace.md)
|
||||
- [Connecting to Microsoft 365](./email-providers/microsoft-365.md)
|
||||
- [Connecting to a Generic IMAP Server](./email-providers/imap.md)
|
||||
|
||||
## Updating Your Installation
|
||||
|
||||
To update your Open Archiver instance to the latest version, run the following commands:
|
||||
|
||||
```bash
|
||||
# Pull the latest changes from the repository
|
||||
git pull
|
||||
|
||||
# Pull the latest Docker images
|
||||
docker compose pull
|
||||
|
||||
# Restart the services with the new images
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
## Deploying on Coolify
|
||||
|
||||
If you are deploying Open Archiver on [Coolify](https://coolify.io/), it is recommended to let Coolify manage the Docker networks for you. This can help avoid potential routing conflicts and simplify your setup.
|
||||
|
||||
To do this, you will need to make a small modification to your `docker-compose.yml` file.
|
||||
|
||||
### Modify `docker-compose.yml` for Coolify
|
||||
|
||||
1. **Open your `docker-compose.yml` file** in a text editor.
|
||||
|
||||
2. **Remove all `networks` sections** from the file. This includes the network configuration for each service and the top-level network definition.
|
||||
|
||||
Specifically, you need to remove:
|
||||
- The `networks: - open-archiver-net` lines from the `open-archiver`, `postgres`, `valkey`, and `meilisearch` services.
|
||||
- The entire `networks:` block at the end of the file.
|
||||
|
||||
Here is an example of what to remove from a service:
|
||||
|
||||
```diff
|
||||
services:
|
||||
open-archiver:
|
||||
image: logiclabshq/open-archiver:latest
|
||||
# ... other settings
|
||||
- networks:
|
||||
- - open-archiver-net
|
||||
```
|
||||
|
||||
And remove this entire block from the end of the file:
|
||||
|
||||
```diff
|
||||
- networks:
|
||||
- open-archiver-net:
|
||||
- driver: bridge
|
||||
```
|
||||
|
||||
3. **Save the modified `docker-compose.yml` file.**
|
||||
|
||||
By removing these sections, you allow Coolify to automatically create and manage the necessary networks, ensuring that all services can communicate with each other and are correctly exposed through Coolify's reverse proxy.
|
||||
|
||||
After making these changes, you can proceed with deploying your application on Coolify as you normally would.
|
||||
|
||||
## Where is my data stored (When using local storage and Docker)?
|
||||
|
||||
If you are using local storage to store your emails, based on your `docker-compose.yml` file, your data is being stored in what's called a "named volume" (`archiver-data`). That's why you're not seeing the files in the `./data/open-archiver` directory you created.
|
||||
|
||||
1. **List all Docker volumes**:
|
||||
|
||||
Run this command to see all the volumes on your system:
|
||||
|
||||
```bash
|
||||
docker volume ls
|
||||
```
|
||||
|
||||
2. **Identify the correct volume**:
|
||||
|
||||
Look through the list for a volume name that ends with `_archiver-data`. The part before that will be your project's directory name. For example, if your project is in a folder named `OpenArchiver`, the volume will be `openarchiver_archiver-data` But it can be a randomly generated hash.
|
||||
|
||||
3. **Inspect the correct volume**:
|
||||
|
||||
Once you've identified the correct volume name, use it in the `inspect` command. For example:
|
||||
|
||||
```bash
|
||||
docker volume inspect <your_volume_name_here>
|
||||
```
|
||||
|
||||
This will give you the correct `Mountpoint` path where your data is being stored. It will look something like this (the exact path will vary depending on your system):
|
||||
|
||||
```json
|
||||
{
|
||||
"CreatedAt": "2025-07-25T11:22:19Z",
|
||||
"Driver": "local",
|
||||
"Labels": {
|
||||
"com.docker.compose.config-hash": "---",
|
||||
"com.docker.compose.project": "---",
|
||||
"com.docker.compose.version": "2.38.2",
|
||||
"com.docker.compose.volume": "us8wwos0o4ok4go4gc8cog84_archiver-data"
|
||||
},
|
||||
"Mountpoint": "/var/lib/docker/volumes/us8wwos0o4ok4go4gc8cog84_archiver-data/_data",
|
||||
"Name": "us8wwos0o4ok4go4gc8cog84_archiver-data",
|
||||
"Options": null,
|
||||
"Scope": "local"
|
||||
}
|
||||
```
|
||||
|
||||
In this example, the data is located at `/var/lib/docker/volumes/us8wwos0o4ok4go4gc8cog84_archiver-data/_data`. You can then `cd` into that directory to see your files.
|
||||
|
||||
### To save data to a specific folder
|
||||
|
||||
To save the data to a specific folder on your machine, you'll need to make a change to your `docker-compose.yml`. You need to switch from a named volume to a "bind mount".
|
||||
|
||||
Here’s how you can do it:
|
||||
|
||||
1. **Edit `docker-compose.yml`**:
|
||||
|
||||
Open the `docker-compose.yml` file and find the `open-archiver` service. You're going to change the `volumes` section.
|
||||
|
||||
**Change this:**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- archiver-data:/var/data/open-archiver
|
||||
```
|
||||
|
||||
**To this:**
|
||||
|
||||
```yaml
|
||||
services:
|
||||
open-archiver:
|
||||
# ... other config
|
||||
volumes:
|
||||
- ./data/open-archiver:/var/data/open-archiver
|
||||
```
|
||||
|
||||
You'll also want to remove the `archiver-data` volume definition at the bottom of the file, since it's no longer needed.
|
||||
|
||||
**Remove this whole block:**
|
||||
|
||||
```yaml
|
||||
volumes:
|
||||
# ... other volumes
|
||||
archiver-data:
|
||||
driver: local
|
||||
```
|
||||
|
||||
2. **Restart your containers**:
|
||||
|
||||
After you've saved the changes, run the following command in your terminal to apply them. The `--force-recreate` flag will ensure the container is recreated with the new volume settings.
|
||||
|
||||
```bash
|
||||
docker-compose up -d --force-recreate
|
||||
```
|
||||
|
||||
After this, any new data will be saved directly into the `./data/open-archiver` folder in your project directory.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### 403 Cross-Site POST Forbidden Error
|
||||
|
||||
If you are running the application behind a reverse proxy or have mapped the application to a different port (e.g., `3005:3000`), you may encounter a `403 Cross-site POST from submissions are forbidden` error when uploading files.
|
||||
|
||||
To resolve this, you must set the `ORIGIN` environment variable to the URL of your application. This ensures that the backend can verify the origin of requests and prevent cross-site request forgery (CSRF) attacks.
|
||||
|
||||
Add the following line to your `.env` file, replacing `<your_host>` and `<your_port>` with your specific values:
|
||||
|
||||
```bash
|
||||
ORIGIN=http://<your_host>:<your_port>
|
||||
```
|
||||
|
||||
For example, if your application is accessible at `http://localhost:3005`, you would set the variable as follows:
|
||||
|
||||
```bash
|
||||
ORIGIN=http://localhost:3005
|
||||
```
|
||||
|
||||
After adding the `ORIGIN` variable, restart your Docker containers for the changes to take effect:
|
||||
|
||||
```bash
|
||||
docker-compose up -d --force-recreate
|
||||
```
|
||||
|
||||
This will ensure that your file uploads are correctly authorized.
|
||||
32
docs/user-guides/settings/system.md
Normal file
32
docs/user-guides/settings/system.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# System Settings
|
||||
|
||||
System settings allow administrators to configure the global look and theme of the application. These settings apply to all users.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Language
|
||||
|
||||
This setting determines the default display language for the application UI. The selected language will be used for all interface elements, including menus, labels, and messages.
|
||||
|
||||
> **Important:** When the language is changed, the backend (API) language will only change after a restart of the server. The frontend will update immediately.
|
||||
|
||||
Supported languages:
|
||||
|
||||
- English
|
||||
- German
|
||||
- French
|
||||
- Estonian
|
||||
- Spanish
|
||||
- Italian
|
||||
- Portuguese
|
||||
- Dutch
|
||||
- Greek
|
||||
- Japanese
|
||||
|
||||
### Default Theme
|
||||
|
||||
This setting controls the default color theme for the application. Users can choose between light, dark, or system default. The system default theme will sync with the user's operating system theme.
|
||||
|
||||
### Support Email
|
||||
|
||||
This setting allows administrators to provide a public-facing email address for user support inquiries. This email address may be displayed on error pages or in other areas where users may need to contact support.
|
||||
70
package.json
70
package.json
@@ -1,32 +1,42 @@
|
||||
{
|
||||
"name": "open-archiver",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "dotenv -- pnpm --filter \"./packages/*\" --parallel dev",
|
||||
"build": "pnpm --filter \"./packages/*\" --parallel build",
|
||||
"start": "dotenv -- pnpm --filter \"./packages/*\" --parallel start",
|
||||
"start:workers": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker\" \"pnpm --filter @open-archiver/backend start:indexing-worker\" \"pnpm --filter @open-archiver/backend start:sync-scheduler\"",
|
||||
"start:workers:dev": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker:dev\" \"pnpm --filter @open-archiver/backend start:indexing-worker:dev\" \"pnpm --filter @open-archiver/backend start:sync-scheduler:dev\"",
|
||||
"db:generate": "dotenv -- pnpm --filter @open-archiver/backend db:generate",
|
||||
"db:migrate": "dotenv -- pnpm --filter @open-archiver/backend db:migrate",
|
||||
"db:migrate:dev": "dotenv -- pnpm --filter @open-archiver/backend db:migrate:dev",
|
||||
"docker-start": "pnpm db:migrate && concurrently \"pnpm start:workers\" \"pnpm start\""
|
||||
},
|
||||
"dependencies": {
|
||||
"concurrently": "^9.2.0",
|
||||
"dotenv-cli": "8.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "5.8.3"
|
||||
},
|
||||
"packageManager": "pnpm@10.13.1",
|
||||
"engines": {
|
||||
"node": ">=22.0.0",
|
||||
"pnpm": "10.13.1"
|
||||
},
|
||||
"pnpm": {
|
||||
"onlyBuiltDependencies": [
|
||||
"esbuild"
|
||||
]
|
||||
}
|
||||
"name": "open-archiver",
|
||||
"version": "0.3.1",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "dotenv -- pnpm --filter \"./packages/*\" --parallel dev",
|
||||
"build": "pnpm --filter \"./packages/*\" build",
|
||||
"start": "dotenv -- pnpm --filter \"./packages/*\" --parallel start",
|
||||
"start:workers": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker\" \"pnpm --filter @open-archiver/backend start:indexing-worker\" \"pnpm --filter @open-archiver/backend start:sync-scheduler\"",
|
||||
"start:workers:dev": "dotenv -- concurrently \"pnpm --filter @open-archiver/backend start:ingestion-worker:dev\" \"pnpm --filter @open-archiver/backend start:indexing-worker:dev\" \"pnpm --filter @open-archiver/backend start:sync-scheduler:dev\"",
|
||||
"db:generate": "dotenv -- pnpm --filter @open-archiver/backend db:generate",
|
||||
"db:migrate": "dotenv -- pnpm --filter @open-archiver/backend db:migrate",
|
||||
"db:migrate:dev": "dotenv -- pnpm --filter @open-archiver/backend db:migrate:dev",
|
||||
"docker-start": "concurrently \"pnpm start:workers\" \"pnpm start\"",
|
||||
"docs:dev": "vitepress dev docs --port 3009",
|
||||
"docs:build": "vitepress build docs",
|
||||
"docs:preview": "vitepress preview docs",
|
||||
"format": "prettier --write .",
|
||||
"lint": "prettier --check ."
|
||||
},
|
||||
"dependencies": {
|
||||
"concurrently": "^9.2.0",
|
||||
"dotenv-cli": "8.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"prettier": "^3.6.2",
|
||||
"prettier-plugin-svelte": "^3.4.0",
|
||||
"prettier-plugin-tailwindcss": "^0.6.14",
|
||||
"typescript": "5.8.3",
|
||||
"vitepress": "^1.6.4"
|
||||
},
|
||||
"packageManager": "pnpm@10.13.1",
|
||||
"engines": {
|
||||
"node": ">=22.0.0",
|
||||
"pnpm": "10.13.1"
|
||||
},
|
||||
"pnpm": {
|
||||
"onlyBuiltDependencies": [
|
||||
"esbuild"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,16 +4,16 @@ import { config } from 'dotenv';
|
||||
config();
|
||||
|
||||
if (!process.env.DATABASE_URL) {
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
schema: './src/database/schema.ts',
|
||||
out: './src/database/migrations',
|
||||
dialect: 'postgresql',
|
||||
dbCredentials: {
|
||||
url: process.env.DATABASE_URL,
|
||||
},
|
||||
verbose: true,
|
||||
strict: true,
|
||||
schema: './src/database/schema.ts',
|
||||
out: './src/database/migrations',
|
||||
dialect: 'postgresql',
|
||||
dbCredentials: {
|
||||
url: process.env.DATABASE_URL,
|
||||
},
|
||||
verbose: true,
|
||||
strict: true,
|
||||
});
|
||||
|
||||
@@ -1,65 +1,80 @@
|
||||
{
|
||||
"name": "@open-archiver/backend",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"main": "dist/index.js",
|
||||
"scripts": {
|
||||
"dev": "ts-node-dev --respawn --transpile-only src/index.ts ",
|
||||
"build": "tsc",
|
||||
"start": "node dist/index.js",
|
||||
"start:ingestion-worker": "node dist/workers/ingestion.worker.js",
|
||||
"start:indexing-worker": "node dist/workers/indexing.worker.js",
|
||||
"start:sync-scheduler": "node dist/jobs/schedulers/sync-scheduler.js",
|
||||
"start:ingestion-worker:dev": "ts-node-dev --respawn --transpile-only src/workers/ingestion.worker.ts",
|
||||
"start:indexing-worker:dev": "ts-node-dev --respawn --transpile-only src/workers/indexing.worker.ts",
|
||||
"start:sync-scheduler:dev": "ts-node-dev --respawn --transpile-only src/jobs/schedulers/sync-scheduler.ts",
|
||||
"db:generate": "drizzle-kit generate --config=drizzle.config.ts",
|
||||
"db:push": "drizzle-kit push --config=drizzle.config.ts",
|
||||
"db:migrate": "node dist/database/migrate.js",
|
||||
"db:migrate:dev": "ts-node-dev src/database/migrate.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"drizzle-kit": "^0.31.4",
|
||||
"@aws-sdk/client-s3": "^3.844.0",
|
||||
"@aws-sdk/lib-storage": "^3.844.0",
|
||||
"@azure/msal-node": "^3.6.3",
|
||||
"@microsoft/microsoft-graph-client": "^3.0.7",
|
||||
"@open-archiver/types": "workspace:*",
|
||||
"axios": "^1.10.0",
|
||||
"bcryptjs": "^3.0.2",
|
||||
"bullmq": "^5.56.3",
|
||||
"cross-fetch": "^4.1.0",
|
||||
"deepmerge-ts": "^7.1.5",
|
||||
"dotenv": "^17.2.0",
|
||||
"drizzle-orm": "^0.44.2",
|
||||
"express": "^5.1.0",
|
||||
"express-validator": "^7.2.1",
|
||||
"google-auth-library": "^10.1.0",
|
||||
"googleapis": "^152.0.0",
|
||||
"imapflow": "^1.0.191",
|
||||
"jose": "^6.0.11",
|
||||
"mailparser": "^3.7.4",
|
||||
"mammoth": "^1.9.1",
|
||||
"meilisearch": "^0.51.0",
|
||||
"pdf2json": "^3.1.6",
|
||||
"pg": "^8.16.3",
|
||||
"pino": "^9.7.0",
|
||||
"pino-pretty": "^13.0.0",
|
||||
"postgres": "^3.4.7",
|
||||
"reflect-metadata": "^0.2.2",
|
||||
"sqlite3": "^5.1.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"xlsx": "^0.18.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@bull-board/api": "^6.11.0",
|
||||
"@bull-board/express": "^6.11.0",
|
||||
"@types/express": "^5.0.3",
|
||||
"@types/mailparser": "^3.4.6",
|
||||
"@types/microsoft-graph": "^2.40.1",
|
||||
"@types/node": "^24.0.12",
|
||||
"bull-board": "^2.1.3",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
"name": "@open-archiver/backend",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"main": "dist/index.js",
|
||||
"scripts": {
|
||||
"dev": "ts-node-dev --respawn --transpile-only src/index.ts ",
|
||||
"build": "tsc && pnpm copy-assets",
|
||||
"copy-assets": "cp -r src/locales dist/locales",
|
||||
"start": "node dist/index.js",
|
||||
"start:ingestion-worker": "node dist/workers/ingestion.worker.js",
|
||||
"start:indexing-worker": "node dist/workers/indexing.worker.js",
|
||||
"start:sync-scheduler": "node dist/jobs/schedulers/sync-scheduler.js",
|
||||
"start:ingestion-worker:dev": "ts-node-dev --respawn --transpile-only src/workers/ingestion.worker.ts",
|
||||
"start:indexing-worker:dev": "ts-node-dev --respawn --transpile-only src/workers/indexing.worker.ts",
|
||||
"start:sync-scheduler:dev": "ts-node-dev --respawn --transpile-only src/jobs/schedulers/sync-scheduler.ts",
|
||||
"db:generate": "drizzle-kit generate --config=drizzle.config.ts",
|
||||
"db:push": "drizzle-kit push --config=drizzle.config.ts",
|
||||
"db:migrate": "node dist/database/migrate.js",
|
||||
"db:migrate:dev": "ts-node-dev src/database/migrate.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@aws-sdk/client-s3": "^3.844.0",
|
||||
"@aws-sdk/lib-storage": "^3.844.0",
|
||||
"@azure/msal-node": "^3.6.3",
|
||||
"@casl/ability": "^6.7.3",
|
||||
"@microsoft/microsoft-graph-client": "^3.0.7",
|
||||
"@open-archiver/types": "workspace:*",
|
||||
"archiver": "^7.0.1",
|
||||
"axios": "^1.10.0",
|
||||
"bcryptjs": "^3.0.2",
|
||||
"bullmq": "^5.56.3",
|
||||
"busboy": "^1.6.0",
|
||||
"cross-fetch": "^4.1.0",
|
||||
"deepmerge-ts": "^7.1.5",
|
||||
"dotenv": "^17.2.0",
|
||||
"drizzle-kit": "^0.31.4",
|
||||
"drizzle-orm": "^0.44.2",
|
||||
"express": "^5.1.0",
|
||||
"express-rate-limit": "^8.0.1",
|
||||
"express-validator": "^7.2.1",
|
||||
"google-auth-library": "^10.1.0",
|
||||
"googleapis": "^152.0.0",
|
||||
"i18next": "^25.4.2",
|
||||
"i18next-fs-backend": "^2.6.0",
|
||||
"i18next-http-middleware": "^3.8.0",
|
||||
"imapflow": "^1.0.191",
|
||||
"jose": "^6.0.11",
|
||||
"mailparser": "^3.7.4",
|
||||
"mammoth": "^1.9.1",
|
||||
"meilisearch": "^0.51.0",
|
||||
"multer": "^2.0.2",
|
||||
"pdf2json": "^3.1.6",
|
||||
"pg": "^8.16.3",
|
||||
"pino": "^9.7.0",
|
||||
"pino-pretty": "^13.0.0",
|
||||
"postgres": "^3.4.7",
|
||||
"pst-extractor": "^1.11.0",
|
||||
"reflect-metadata": "^0.2.2",
|
||||
"sqlite3": "^5.1.7",
|
||||
"tsconfig-paths": "^4.2.0",
|
||||
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz",
|
||||
"yauzl": "^3.2.0",
|
||||
"zod": "^4.1.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@bull-board/api": "^6.11.0",
|
||||
"@bull-board/express": "^6.11.0",
|
||||
"@types/archiver": "^6.0.3",
|
||||
"@types/busboy": "^1.5.4",
|
||||
"@types/express": "^5.0.3",
|
||||
"@types/mailparser": "^3.4.6",
|
||||
"@types/microsoft-graph": "^2.40.1",
|
||||
"@types/multer": "^2.0.0",
|
||||
"@types/node": "^24.0.12",
|
||||
"@types/yauzl": "^2.10.3",
|
||||
"ts-node-dev": "^2.0.0",
|
||||
"typescript": "^5.8.3"
|
||||
}
|
||||
}
|
||||
|
||||
66
packages/backend/src/api/controllers/api-key.controller.ts
Normal file
66
packages/backend/src/api/controllers/api-key.controller.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { ApiKeyService } from '../../services/ApiKeyService';
|
||||
import { z } from 'zod';
|
||||
import { config } from '../../config';
|
||||
|
||||
const generateApiKeySchema = z.object({
|
||||
name: z
|
||||
.string()
|
||||
.min(1, 'API kay name must be more than 1 characters')
|
||||
.max(255, 'API kay name must not be more than 255 characters'),
|
||||
expiresInDays: z
|
||||
.number()
|
||||
.int()
|
||||
.positive('Only positive number is allowed')
|
||||
.max(730, 'The API key must expire within 2 years / 730 days.'),
|
||||
});
|
||||
|
||||
export class ApiKeyController {
|
||||
public async generateApiKey(req: Request, res: Response) {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { name, expiresInDays } = generateApiKeySchema.parse(req.body);
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
|
||||
const key = await ApiKeyService.generate(userId, name, expiresInDays);
|
||||
|
||||
res.status(201).json({ key });
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
return res
|
||||
.status(400)
|
||||
.json({ message: req.t('api.requestBodyInvalid'), errors: error.message });
|
||||
}
|
||||
res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
}
|
||||
|
||||
public async getApiKeys(req: Request, res: Response) {
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
const keys = await ApiKeyService.getKeys(userId);
|
||||
|
||||
res.status(200).json(keys);
|
||||
}
|
||||
|
||||
public async deleteApiKey(req: Request, res: Response) {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { id } = req.params;
|
||||
if (!req.user || !req.user.sub) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
const userId = req.user.sub;
|
||||
await ApiKeyService.deleteKey(id, userId);
|
||||
|
||||
res.status(204).send({ message: req.t('apiKeys.deleteSuccess') });
|
||||
}
|
||||
}
|
||||
@@ -1,36 +1,69 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { ArchivedEmailService } from '../../services/ArchivedEmailService';
|
||||
import { config } from '../../config';
|
||||
|
||||
export class ArchivedEmailController {
|
||||
public getArchivedEmails = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { ingestionSourceId } = req.params;
|
||||
const page = parseInt(req.query.page as string, 10) || 1;
|
||||
const limit = parseInt(req.query.limit as string, 10) || 10;
|
||||
public getArchivedEmails = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { ingestionSourceId } = req.params;
|
||||
const page = parseInt(req.query.page as string, 10) || 1;
|
||||
const limit = parseInt(req.query.limit as string, 10) || 10;
|
||||
const userId = req.user?.sub;
|
||||
|
||||
const result = await ArchivedEmailService.getArchivedEmails(
|
||||
ingestionSourceId,
|
||||
page,
|
||||
limit
|
||||
);
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
console.error('Get archived emails error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
|
||||
public getArchivedEmailById = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const email = await ArchivedEmailService.getArchivedEmailById(id);
|
||||
if (!email) {
|
||||
return res.status(404).json({ message: 'Archived email not found' });
|
||||
}
|
||||
return res.status(200).json(email);
|
||||
} catch (error) {
|
||||
console.error(`Get archived email by id ${req.params.id} error:`, error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
const result = await ArchivedEmailService.getArchivedEmails(
|
||||
ingestionSourceId,
|
||||
page,
|
||||
limit,
|
||||
userId
|
||||
);
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
console.error('Get archived emails error:', error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public getArchivedEmailById = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
|
||||
const email = await ArchivedEmailService.getArchivedEmailById(id, userId);
|
||||
if (!email) {
|
||||
return res.status(404).json({ message: req.t('archivedEmail.notFound') });
|
||||
}
|
||||
return res.status(200).json(email);
|
||||
} catch (error) {
|
||||
console.error(`Get archived email by id ${req.params.id} error:`, error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public deleteArchivedEmail = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await ArchivedEmailService.deleteArchivedEmail(id);
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
console.error(`Delete archived email ${req.params.id} error:`, error);
|
||||
if (error instanceof Error) {
|
||||
if (error.message === 'Archived email not found') {
|
||||
return res.status(404).json({ message: req.t('archivedEmail.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,32 +1,130 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import type { IAuthService } from '../../services/AuthService';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import { IamService } from '../../services/IamService';
|
||||
import { db } from '../../database';
|
||||
import * as schema from '../../database/schema';
|
||||
import { eq, sql } from 'drizzle-orm';
|
||||
import 'dotenv/config';
|
||||
import { AuthorizationService } from '../../services/AuthorizationService';
|
||||
import { CaslPolicy } from '@open-archiver/types';
|
||||
|
||||
export class AuthController {
|
||||
#authService: IAuthService;
|
||||
#authService: AuthService;
|
||||
#userService: UserService;
|
||||
|
||||
constructor(authService: IAuthService) {
|
||||
this.#authService = authService;
|
||||
}
|
||||
constructor(authService: AuthService, userService: UserService) {
|
||||
this.#authService = authService;
|
||||
this.#userService = userService;
|
||||
}
|
||||
/**
|
||||
* Only used for setting up the instance, should only be displayed once upon instance set up.
|
||||
* @param req
|
||||
* @param res
|
||||
* @returns
|
||||
*/
|
||||
public setup = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { email, password, first_name, last_name } = req.body;
|
||||
|
||||
public login = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { email, password } = req.body;
|
||||
if (!email || !password || !first_name || !last_name) {
|
||||
return res.status(400).json({ message: req.t('auth.setup.allFieldsRequired') });
|
||||
}
|
||||
|
||||
if (!email || !password) {
|
||||
return res.status(400).json({ message: 'Email and password are required' });
|
||||
}
|
||||
try {
|
||||
const userCountResult = await db
|
||||
.select({ count: sql<number>`count(*)` })
|
||||
.from(schema.users);
|
||||
const userCount = Number(userCountResult[0].count);
|
||||
|
||||
try {
|
||||
const result = await this.#authService.login(email, password);
|
||||
if (userCount > 0) {
|
||||
return res.status(403).json({ message: req.t('auth.setup.alreadyCompleted') });
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
return res.status(401).json({ message: 'Invalid credentials' });
|
||||
}
|
||||
const newUser = await this.#userService.createAdminUser(
|
||||
{ email, password, first_name, last_name },
|
||||
true
|
||||
);
|
||||
const result = await this.#authService.login(email, password);
|
||||
return res.status(201).json(result);
|
||||
} catch (error) {
|
||||
console.error('Setup error:', error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
// In a real application, you'd want to log this error.
|
||||
console.error('Login error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public login = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { email, password } = req.body;
|
||||
|
||||
if (!email || !password) {
|
||||
return res.status(400).json({ message: req.t('auth.login.emailAndPasswordRequired') });
|
||||
}
|
||||
|
||||
try {
|
||||
const result = await this.#authService.login(email, password);
|
||||
|
||||
if (!result) {
|
||||
return res.status(401).json({ message: req.t('auth.login.invalidCredentials') });
|
||||
}
|
||||
|
||||
return res.status(200).json(result);
|
||||
} catch (error) {
|
||||
console.error('Login error:', error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public status = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const users = await db.select().from(schema.users);
|
||||
|
||||
/**
|
||||
* Check the situation where the only user has "Super Admin" role, but they don't actually have Super Admin permission because the role was set up in an earlier version, we need to change that "Super Admin" role to the one used in the current version.
|
||||
*/
|
||||
if (users.length === 1) {
|
||||
const iamService = new IamService();
|
||||
const userRoles = await iamService.getRolesForUser(users[0].id);
|
||||
if (userRoles.some((r) => r.name === 'Super Admin')) {
|
||||
const authorizationService = new AuthorizationService();
|
||||
const hasAdminPermission = await authorizationService.can(
|
||||
users[0].id,
|
||||
'manage',
|
||||
'all'
|
||||
);
|
||||
if (!hasAdminPermission) {
|
||||
const suerAdminPolicies: CaslPolicy[] = [
|
||||
{
|
||||
action: 'manage',
|
||||
subject: 'all',
|
||||
},
|
||||
];
|
||||
await db
|
||||
.update(schema.roles)
|
||||
.set({
|
||||
policies: suerAdminPolicies,
|
||||
slug: 'predefined_super_admin',
|
||||
})
|
||||
.where(eq(schema.roles.name, 'Super Admin'));
|
||||
}
|
||||
}
|
||||
}
|
||||
// in case user uses older version with admin user variables, we will create the admin user using those variables.
|
||||
const needsSetupUser = users.length === 0;
|
||||
if (needsSetupUser && process.env.ADMIN_EMAIL && process.env.ADMIN_PASSWORD) {
|
||||
await this.#userService.createAdminUser(
|
||||
{
|
||||
email: process.env.ADMIN_EMAIL,
|
||||
password: process.env.ADMIN_PASSWORD,
|
||||
first_name: 'Admin',
|
||||
last_name: 'User',
|
||||
},
|
||||
true
|
||||
);
|
||||
return res.status(200).json({ needsSetup: false });
|
||||
}
|
||||
return res.status(200).json({ needsSetup: needsSetupUser });
|
||||
} catch (error) {
|
||||
console.error('Status check error:', error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,30 +2,30 @@ import { Request, Response } from 'express';
|
||||
import { dashboardService } from '../../services/DashboardService';
|
||||
|
||||
class DashboardController {
|
||||
public async getStats(req: Request, res: Response) {
|
||||
const stats = await dashboardService.getStats();
|
||||
res.json(stats);
|
||||
}
|
||||
public async getStats(req: Request, res: Response) {
|
||||
const stats = await dashboardService.getStats();
|
||||
res.json(stats);
|
||||
}
|
||||
|
||||
public async getIngestionHistory(req: Request, res: Response) {
|
||||
const history = await dashboardService.getIngestionHistory();
|
||||
res.json(history);
|
||||
}
|
||||
public async getIngestionHistory(req: Request, res: Response) {
|
||||
const history = await dashboardService.getIngestionHistory();
|
||||
res.json(history);
|
||||
}
|
||||
|
||||
public async getIngestionSources(req: Request, res: Response) {
|
||||
const sources = await dashboardService.getIngestionSources();
|
||||
res.json(sources);
|
||||
}
|
||||
public async getIngestionSources(req: Request, res: Response) {
|
||||
const sources = await dashboardService.getIngestionSources();
|
||||
res.json(sources);
|
||||
}
|
||||
|
||||
public async getRecentSyncs(req: Request, res: Response) {
|
||||
const syncs = await dashboardService.getRecentSyncs();
|
||||
res.json(syncs);
|
||||
}
|
||||
public async getRecentSyncs(req: Request, res: Response) {
|
||||
const syncs = await dashboardService.getRecentSyncs();
|
||||
res.json(syncs);
|
||||
}
|
||||
|
||||
public async getIndexedInsights(req: Request, res: Response) {
|
||||
const insights = await dashboardService.getIndexedInsights();
|
||||
res.json(insights);
|
||||
}
|
||||
public async getIndexedInsights(req: Request, res: Response) {
|
||||
const insights = await dashboardService.getIndexedInsights();
|
||||
res.json(insights);
|
||||
}
|
||||
}
|
||||
|
||||
export const dashboardController = new DashboardController();
|
||||
|
||||
161
packages/backend/src/api/controllers/iam.controller.ts
Normal file
161
packages/backend/src/api/controllers/iam.controller.ts
Normal file
@@ -0,0 +1,161 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { IamService } from '../../services/IamService';
|
||||
import { PolicyValidator } from '../../iam-policy/policy-validator';
|
||||
import type { CaslPolicy } from '@open-archiver/types';
|
||||
import { logger } from '../../config/logger';
|
||||
import { config } from '../../config';
|
||||
|
||||
export class IamController {
|
||||
#iamService: IamService;
|
||||
|
||||
constructor(iamService: IamService) {
|
||||
this.#iamService = iamService;
|
||||
}
|
||||
|
||||
public getRoles = async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
let roles = await this.#iamService.getRoles();
|
||||
if (!roles.some((r) => r.slug?.includes('predefined_'))) {
|
||||
// create pre defined roles
|
||||
logger.info({}, 'Creating predefined roles');
|
||||
await this.createDefaultRoles();
|
||||
}
|
||||
res.status(200).json(roles);
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: req.t('iam.failedToGetRoles') });
|
||||
}
|
||||
};
|
||||
|
||||
public getRoleById = async (req: Request, res: Response): Promise<void> => {
|
||||
const { id } = req.params;
|
||||
|
||||
try {
|
||||
const role = await this.#iamService.getRoleById(id);
|
||||
if (role) {
|
||||
res.status(200).json(role);
|
||||
} else {
|
||||
res.status(404).json({ message: req.t('iam.roleNotFound') });
|
||||
}
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: req.t('iam.failedToGetRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public createRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { name, policies } = req.body;
|
||||
|
||||
if (!name || !policies) {
|
||||
res.status(400).json({ message: req.t('iam.missingRoleFields') });
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
for (const statement of policies) {
|
||||
const { valid, reason } = PolicyValidator.isValid(statement as CaslPolicy);
|
||||
if (!valid) {
|
||||
res.status(400).json({ message: `${req.t('iam.invalidPolicy')} ${reason}` });
|
||||
return;
|
||||
}
|
||||
}
|
||||
const role = await this.#iamService.createRole(name, policies);
|
||||
res.status(201).json(role);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
res.status(500).json({ message: req.t('iam.failedToCreateRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public deleteRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { id } = req.params;
|
||||
|
||||
try {
|
||||
await this.#iamService.deleteRole(id);
|
||||
res.status(204).send();
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: req.t('iam.failedToDeleteRole') });
|
||||
}
|
||||
};
|
||||
|
||||
public updateRole = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { id } = req.params;
|
||||
const { name, policies } = req.body;
|
||||
|
||||
if (!name && !policies) {
|
||||
res.status(400).json({ message: req.t('iam.missingUpdateFields') });
|
||||
return;
|
||||
}
|
||||
|
||||
if (policies) {
|
||||
for (const statement of policies) {
|
||||
const { valid, reason } = PolicyValidator.isValid(statement as CaslPolicy);
|
||||
if (!valid) {
|
||||
res.status(400).json({ message: `${req.t('iam.invalidPolicy')} ${reason}` });
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const role = await this.#iamService.updateRole(id, { name, policies });
|
||||
res.status(200).json(role);
|
||||
} catch (error) {
|
||||
res.status(500).json({ message: req.t('iam.failedToUpdateRole') });
|
||||
}
|
||||
};
|
||||
|
||||
private createDefaultRoles = async () => {
|
||||
try {
|
||||
// end user who can manage its own data, and create new ingestions.
|
||||
await this.#iamService.createRole(
|
||||
'End user',
|
||||
[
|
||||
{
|
||||
action: 'read',
|
||||
subject: 'dashboard',
|
||||
},
|
||||
{
|
||||
action: 'create',
|
||||
subject: 'ingestion',
|
||||
},
|
||||
{
|
||||
action: 'manage',
|
||||
subject: 'ingestion',
|
||||
conditions: {
|
||||
userId: '${user.id}',
|
||||
},
|
||||
},
|
||||
{
|
||||
action: 'manage',
|
||||
subject: 'archive',
|
||||
conditions: {
|
||||
'ingestionSource.userId': '${user.id}',
|
||||
},
|
||||
},
|
||||
],
|
||||
'predefined_end_user'
|
||||
);
|
||||
// read only
|
||||
await this.#iamService.createRole(
|
||||
'Read only',
|
||||
[
|
||||
{
|
||||
action: ['read', 'search'],
|
||||
subject: ['ingestion', 'archive', 'dashboard', 'users', 'roles'],
|
||||
},
|
||||
],
|
||||
'predefined_read_only_user'
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error({}, 'Failed to create default roles');
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -1,97 +1,163 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { IngestionService } from '../../services/IngestionService';
|
||||
import { CreateIngestionSourceDto, UpdateIngestionSourceDto } from '@open-archiver/types';
|
||||
import {
|
||||
CreateIngestionSourceDto,
|
||||
UpdateIngestionSourceDto,
|
||||
IngestionSource,
|
||||
SafeIngestionSource,
|
||||
} from '@open-archiver/types';
|
||||
import { logger } from '../../config/logger';
|
||||
import { config } from '../../config';
|
||||
|
||||
export class IngestionController {
|
||||
public create = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const dto: CreateIngestionSourceDto = req.body;
|
||||
const newSource = await IngestionService.create(dto);
|
||||
return res.status(201).json(newSource);
|
||||
} catch (error) {
|
||||
console.error('Create ingestion source error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
/**
|
||||
* Converts an IngestionSource object to a safe version for client-side consumption
|
||||
* by removing the credentials.
|
||||
* @param source The full IngestionSource object.
|
||||
* @returns An object conforming to the SafeIngestionSource type.
|
||||
*/
|
||||
private toSafeIngestionSource(source: IngestionSource): SafeIngestionSource {
|
||||
const { credentials, ...safeSource } = source;
|
||||
return safeSource;
|
||||
}
|
||||
|
||||
public findAll = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const sources = await IngestionService.findAll();
|
||||
return res.status(200).json(sources);
|
||||
} catch (error) {
|
||||
console.error('Find all ingestion sources error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public create = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const dto: CreateIngestionSourceDto = req.body;
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const newSource = await IngestionService.create(dto, userId);
|
||||
const safeSource = this.toSafeIngestionSource(newSource);
|
||||
return res.status(201).json(safeSource);
|
||||
} catch (error: any) {
|
||||
logger.error({ err: error }, 'Create ingestion source error');
|
||||
// Return a 400 Bad Request for connection errors
|
||||
return res.status(400).json({
|
||||
message: error.message || req.t('ingestion.failedToCreate'),
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
public findById = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const source = await IngestionService.findById(id);
|
||||
return res.status(200).json(source);
|
||||
} catch (error) {
|
||||
console.error(`Find ingestion source by id ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public findAll = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const userId = req.user?.sub;
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
}
|
||||
const sources = await IngestionService.findAll(userId);
|
||||
const safeSources = sources.map(this.toSafeIngestionSource);
|
||||
return res.status(200).json(safeSources);
|
||||
} catch (error) {
|
||||
console.error('Find all ingestion sources error:', error);
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public update = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const dto: UpdateIngestionSourceDto = req.body;
|
||||
const updatedSource = await IngestionService.update(id, dto);
|
||||
return res.status(200).json(updatedSource);
|
||||
} catch (error) {
|
||||
console.error(`Update ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public findById = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const source = await IngestionService.findById(id);
|
||||
const safeSource = this.toSafeIngestionSource(source);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
console.error(`Find ingestion source by id ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public delete = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.delete(id);
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
console.error(`Delete ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public update = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const dto: UpdateIngestionSourceDto = req.body;
|
||||
const updatedSource = await IngestionService.update(id, dto);
|
||||
const safeSource = this.toSafeIngestionSource(updatedSource);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
console.error(`Update ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public triggerInitialImport = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.triggerInitialImport(id);
|
||||
return res.status(202).json({ message: 'Initial import triggered successfully.' });
|
||||
} catch (error) {
|
||||
console.error(`Trigger initial import for ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public delete = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.delete(id);
|
||||
return res.status(204).send();
|
||||
} catch (error) {
|
||||
console.error(`Delete ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public pause = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const updatedSource = await IngestionService.update(id, { status: 'paused' });
|
||||
return res.status(200).json(updatedSource);
|
||||
} catch (error) {
|
||||
console.error(`Pause ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: error.message });
|
||||
}
|
||||
return res.status(500).json({ message: 'An internal server error occurred' });
|
||||
}
|
||||
};
|
||||
public triggerInitialImport = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.triggerInitialImport(id);
|
||||
return res.status(202).json({ message: req.t('ingestion.initialImportTriggered') });
|
||||
} catch (error) {
|
||||
console.error(`Trigger initial import for ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public pause = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
const updatedSource = await IngestionService.update(id, { status: 'paused' });
|
||||
const safeSource = this.toSafeIngestionSource(updatedSource);
|
||||
return res.status(200).json(safeSource);
|
||||
} catch (error) {
|
||||
console.error(`Pause ingestion source ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
|
||||
public triggerForceSync = async (req: Request, res: Response): Promise<Response> => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
try {
|
||||
const { id } = req.params;
|
||||
await IngestionService.triggerForceSync(id);
|
||||
return res.status(202).json({ message: req.t('ingestion.forceSyncTriggered') });
|
||||
} catch (error) {
|
||||
console.error(`Trigger force sync for ${req.params.id} error:`, error);
|
||||
if (error instanceof Error && error.message === 'Ingestion source not found') {
|
||||
return res.status(404).json({ message: req.t('ingestion.notFound') });
|
||||
}
|
||||
return res.status(500).json({ message: req.t('errors.internalServerError') });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -3,32 +3,41 @@ import { SearchService } from '../../services/SearchService';
|
||||
import { MatchingStrategies } from 'meilisearch';
|
||||
|
||||
export class SearchController {
|
||||
private searchService: SearchService;
|
||||
private searchService: SearchService;
|
||||
|
||||
constructor() {
|
||||
this.searchService = new SearchService();
|
||||
}
|
||||
constructor() {
|
||||
this.searchService = new SearchService();
|
||||
}
|
||||
|
||||
public search = async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { keywords, page, limit, matchingStrategy } = req.query;
|
||||
public search = async (req: Request, res: Response): Promise<void> => {
|
||||
try {
|
||||
const { keywords, page, limit, matchingStrategy } = req.query;
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!keywords) {
|
||||
res.status(400).json({ message: 'Keywords are required' });
|
||||
return;
|
||||
}
|
||||
if (!userId) {
|
||||
res.status(401).json({ message: req.t('errors.unauthorized') });
|
||||
return;
|
||||
}
|
||||
|
||||
const results = await this.searchService.searchEmails({
|
||||
query: keywords as string,
|
||||
page: page ? parseInt(page as string) : 1,
|
||||
limit: limit ? parseInt(limit as string) : 10,
|
||||
matchingStrategy: matchingStrategy as MatchingStrategies
|
||||
});
|
||||
if (!keywords) {
|
||||
res.status(400).json({ message: req.t('search.keywordsRequired') });
|
||||
return;
|
||||
}
|
||||
|
||||
res.status(200).json(results);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : 'An unknown error occurred';
|
||||
res.status(500).json({ message });
|
||||
}
|
||||
};
|
||||
const results = await this.searchService.searchEmails(
|
||||
{
|
||||
query: keywords as string,
|
||||
page: page ? parseInt(page as string) : 1,
|
||||
limit: limit ? parseInt(limit as string) : 10,
|
||||
matchingStrategy: matchingStrategy as MatchingStrategies,
|
||||
},
|
||||
userId
|
||||
);
|
||||
|
||||
res.status(200).json(results);
|
||||
} catch (error) {
|
||||
const message = error instanceof Error ? error.message : req.t('errors.unknown');
|
||||
res.status(500).json({ message });
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
29
packages/backend/src/api/controllers/settings.controller.ts
Normal file
29
packages/backend/src/api/controllers/settings.controller.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import type { Request, Response } from 'express';
|
||||
import { SettingsService } from '../../services/SettingsService';
|
||||
import { config } from '../../config';
|
||||
|
||||
const settingsService = new SettingsService();
|
||||
|
||||
export const getSystemSettings = async (req: Request, res: Response) => {
|
||||
try {
|
||||
const settings = await settingsService.getSystemSettings();
|
||||
res.status(200).json(settings);
|
||||
} catch (error) {
|
||||
// A more specific error could be logged here
|
||||
res.status(500).json({ message: req.t('settings.failedToRetrieve') });
|
||||
}
|
||||
};
|
||||
|
||||
export const updateSystemSettings = async (req: Request, res: Response) => {
|
||||
try {
|
||||
// Basic validation can be performed here if necessary
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const updatedSettings = await settingsService.updateSystemSettings(req.body);
|
||||
res.status(200).json(updatedSettings);
|
||||
} catch (error) {
|
||||
// A more specific error could be logged here
|
||||
res.status(500).json({ message: req.t('settings.failedToUpdate') });
|
||||
}
|
||||
};
|
||||
@@ -1,32 +1,50 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { StorageService } from '../../services/StorageService';
|
||||
import * as path from 'path';
|
||||
import { storage as storageConfig } from '../../config/storage';
|
||||
|
||||
export class StorageController {
|
||||
constructor(private storageService: StorageService) { }
|
||||
constructor(private storageService: StorageService) {}
|
||||
|
||||
public downloadFile = async (req: Request, res: Response): Promise<void> => {
|
||||
const filePath = req.query.path as string;
|
||||
public downloadFile = async (req: Request, res: Response): Promise<void> => {
|
||||
const unsafePath = req.query.path as string;
|
||||
|
||||
if (!filePath) {
|
||||
res.status(400).send('File path is required');
|
||||
return;
|
||||
}
|
||||
if (!unsafePath) {
|
||||
res.status(400).send(req.t('storage.filePathRequired'));
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const fileExists = await this.storageService.exists(filePath);
|
||||
if (!fileExists) {
|
||||
console.log(filePath);
|
||||
res.status(404).send('File not found');
|
||||
return;
|
||||
}
|
||||
// Normalize the path to prevent directory traversal
|
||||
const normalizedPath = path.normalize(unsafePath).replace(/^(\.\.(\/|\\|$))+/, '');
|
||||
|
||||
const fileStream = await this.storageService.get(filePath);
|
||||
const fileName = filePath.split('/').pop();
|
||||
res.setHeader('Content-Disposition', `attachment; filename=${fileName}`);
|
||||
fileStream.pipe(res);
|
||||
} catch (error) {
|
||||
console.error('Error downloading file:', error);
|
||||
res.status(500).send('Error downloading file');
|
||||
}
|
||||
};
|
||||
// Determine the base path from storage configuration
|
||||
const basePath = storageConfig.type === 'local' ? storageConfig.rootPath : '/';
|
||||
|
||||
// Resolve the full path and ensure it's within the storage directory
|
||||
const fullPath = path.join(basePath, normalizedPath);
|
||||
|
||||
if (!fullPath.startsWith(basePath)) {
|
||||
res.status(400).send(req.t('storage.invalidFilePath'));
|
||||
return;
|
||||
}
|
||||
|
||||
// Use the sanitized, relative path for storage service operations
|
||||
const safePath = path.relative(basePath, fullPath);
|
||||
|
||||
try {
|
||||
const fileExists = await this.storageService.exists(safePath);
|
||||
if (!fileExists) {
|
||||
res.status(404).send(req.t('storage.fileNotFound'));
|
||||
return;
|
||||
}
|
||||
|
||||
const fileStream = await this.storageService.get(safePath);
|
||||
const fileName = path.basename(safePath);
|
||||
res.setHeader('Content-Disposition', `attachment; filename="${fileName}"`);
|
||||
fileStream.pipe(res);
|
||||
} catch (error) {
|
||||
console.error('Error downloading file:', error);
|
||||
res.status(500).send(req.t('storage.downloadError'));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
25
packages/backend/src/api/controllers/upload.controller.ts
Normal file
25
packages/backend/src/api/controllers/upload.controller.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { StorageService } from '../../services/StorageService';
|
||||
import { randomUUID } from 'crypto';
|
||||
import busboy from 'busboy';
|
||||
import { config } from '../../config/index';
|
||||
|
||||
export const uploadFile = async (req: Request, res: Response) => {
|
||||
const storage = new StorageService();
|
||||
const bb = busboy({ headers: req.headers });
|
||||
let filePath = '';
|
||||
let originalFilename = '';
|
||||
|
||||
bb.on('file', (fieldname, file, filename) => {
|
||||
originalFilename = filename.filename;
|
||||
const uuid = randomUUID();
|
||||
filePath = `${config.storage.openArchiverFolderName}/tmp/${uuid}-${originalFilename}`;
|
||||
storage.put(filePath, file);
|
||||
});
|
||||
|
||||
bb.on('finish', () => {
|
||||
res.json({ filePath });
|
||||
});
|
||||
|
||||
req.pipe(bb);
|
||||
};
|
||||
66
packages/backend/src/api/controllers/user.controller.ts
Normal file
66
packages/backend/src/api/controllers/user.controller.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { Request, Response } from 'express';
|
||||
import { UserService } from '../../services/UserService';
|
||||
import * as schema from '../../database/schema';
|
||||
import { sql } from 'drizzle-orm';
|
||||
import { db } from '../../database';
|
||||
import { config } from '../../config';
|
||||
|
||||
const userService = new UserService();
|
||||
|
||||
export const getUsers = async (req: Request, res: Response) => {
|
||||
const users = await userService.findAll();
|
||||
res.json(users);
|
||||
};
|
||||
|
||||
export const getUser = async (req: Request, res: Response) => {
|
||||
const user = await userService.findById(req.params.id);
|
||||
if (!user) {
|
||||
return res.status(404).json({ message: req.t('user.notFound') });
|
||||
}
|
||||
res.json(user);
|
||||
};
|
||||
|
||||
export const createUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { email, first_name, last_name, password, roleId } = req.body;
|
||||
|
||||
const newUser = await userService.createUser(
|
||||
{ email, first_name, last_name, password },
|
||||
roleId
|
||||
);
|
||||
res.status(201).json(newUser);
|
||||
};
|
||||
|
||||
export const updateUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const { email, first_name, last_name, roleId } = req.body;
|
||||
const updatedUser = await userService.updateUser(
|
||||
req.params.id,
|
||||
{ email, first_name, last_name },
|
||||
roleId
|
||||
);
|
||||
if (!updatedUser) {
|
||||
return res.status(404).json({ message: req.t('user.notFound') });
|
||||
}
|
||||
res.json(updatedUser);
|
||||
};
|
||||
|
||||
export const deleteUser = async (req: Request, res: Response) => {
|
||||
if (config.app.isDemo) {
|
||||
return res.status(403).json({ message: req.t('errors.demoMode') });
|
||||
}
|
||||
const userCountResult = await db.select({ count: sql<number>`count(*)` }).from(schema.users);
|
||||
|
||||
const isOnlyUser = Number(userCountResult[0].count) === 1;
|
||||
if (isOnlyUser) {
|
||||
return res.status(400).json({
|
||||
message: req.t('user.cannotDeleteOnlyUser'),
|
||||
});
|
||||
}
|
||||
await userService.deleteUser(req.params.id);
|
||||
res.status(204).send();
|
||||
};
|
||||
16
packages/backend/src/api/middleware/rateLimiter.ts
Normal file
16
packages/backend/src/api/middleware/rateLimiter.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
import rateLimit from 'express-rate-limit';
|
||||
import { config } from '../../config';
|
||||
|
||||
const windowInMinutes = Math.ceil(config.api.rateLimit.windowMs / 60000);
|
||||
|
||||
export const rateLimiter = rateLimit({
|
||||
windowMs: config.api.rateLimit.windowMs,
|
||||
max: config.api.rateLimit.max,
|
||||
message: {
|
||||
status: 429,
|
||||
message: `Too many requests from this IP, please try again after ${windowInMinutes} minutes`,
|
||||
},
|
||||
statusCode: 429,
|
||||
standardHeaders: true,
|
||||
legacyHeaders: false,
|
||||
});
|
||||
@@ -1,39 +1,58 @@
|
||||
import type { Request, Response, NextFunction } from 'express';
|
||||
import type { IAuthService } from '../../services/AuthService';
|
||||
import type { AuthService } from '../../services/AuthService';
|
||||
import type { AuthTokenPayload } from '@open-archiver/types';
|
||||
import 'dotenv/config';
|
||||
import { ApiKeyService } from '../../services/ApiKeyService';
|
||||
import { UserService } from '../../services/UserService';
|
||||
|
||||
// By using module augmentation, we can add our custom 'user' property
|
||||
// to the Express Request interface in a type-safe way.
|
||||
declare global {
|
||||
namespace Express {
|
||||
export interface Request {
|
||||
user?: AuthTokenPayload;
|
||||
}
|
||||
}
|
||||
namespace Express {
|
||||
export interface Request {
|
||||
user?: AuthTokenPayload;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const requireAuth = (authService: IAuthService) => {
|
||||
return async (req: Request, res: Response, next: NextFunction) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
||||
return res.status(401).json({ message: 'Unauthorized: No token provided' });
|
||||
}
|
||||
const token = authHeader.split(' ')[1];
|
||||
try {
|
||||
// use a SUPER_API_KEY for all authentications.
|
||||
if (token === process.env.SUPER_API_KEY) {
|
||||
next();
|
||||
return;
|
||||
}
|
||||
const payload = await authService.verifyToken(token);
|
||||
if (!payload) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid token' });
|
||||
}
|
||||
req.user = payload;
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Authentication error:', error);
|
||||
return res.status(500).json({ message: 'An internal server error occurred during authentication' });
|
||||
}
|
||||
};
|
||||
export const requireAuth = (authService: AuthService) => {
|
||||
return async (req: Request, res: Response, next: NextFunction) => {
|
||||
const authHeader = req.headers.authorization;
|
||||
const apiKeyHeader = req.headers['x-api-key'];
|
||||
|
||||
if (apiKeyHeader) {
|
||||
const userId = await ApiKeyService.validateKey(apiKeyHeader as string);
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid API key' });
|
||||
}
|
||||
const user = await new UserService().findById(userId);
|
||||
if (!user) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid user' });
|
||||
}
|
||||
req.user = {
|
||||
sub: user.id,
|
||||
email: user.email,
|
||||
roles: user.role ? [user.role.name] : [],
|
||||
};
|
||||
return next();
|
||||
}
|
||||
|
||||
if (!authHeader || !authHeader.startsWith('Bearer ')) {
|
||||
return res.status(401).json({ message: 'Unauthorized: No token provided' });
|
||||
}
|
||||
const token = authHeader.split(' ')[1];
|
||||
try {
|
||||
const payload = await authService.verifyToken(token);
|
||||
if (!payload) {
|
||||
return res.status(401).json({ message: 'Unauthorized: Invalid token' });
|
||||
}
|
||||
req.user = payload;
|
||||
next();
|
||||
} catch (error) {
|
||||
console.error('Authentication error:', error);
|
||||
return res
|
||||
.status(500)
|
||||
.json({ message: 'An internal server error occurred during authentication' });
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
38
packages/backend/src/api/middleware/requirePermission.ts
Normal file
38
packages/backend/src/api/middleware/requirePermission.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { AuthorizationService } from '../../services/AuthorizationService';
|
||||
import type { Request, Response, NextFunction } from 'express';
|
||||
import { AppActions, AppSubjects } from '@open-archiver/types';
|
||||
|
||||
export const requirePermission = (
|
||||
action: AppActions,
|
||||
subjectName: AppSubjects,
|
||||
rejectMessage?: string
|
||||
) => {
|
||||
return async (req: Request, res: Response, next: NextFunction) => {
|
||||
const userId = req.user?.sub;
|
||||
|
||||
if (!userId) {
|
||||
return res.status(401).json({ message: 'Unauthorized' });
|
||||
}
|
||||
|
||||
let resourceObject = undefined;
|
||||
// Logic to fetch resourceObject if needed for condition-based checks...
|
||||
const authorizationService = new AuthorizationService();
|
||||
const hasPermission = await authorizationService.can(
|
||||
userId,
|
||||
action,
|
||||
subjectName,
|
||||
resourceObject
|
||||
);
|
||||
|
||||
if (!hasPermission) {
|
||||
const message = rejectMessage
|
||||
? req.t(rejectMessage)
|
||||
: req.t('errors.noPermissionToAction');
|
||||
return res.status(403).json({
|
||||
message,
|
||||
});
|
||||
}
|
||||
|
||||
next();
|
||||
};
|
||||
};
|
||||
15
packages/backend/src/api/routes/api-key.routes.ts
Normal file
15
packages/backend/src/api/routes/api-key.routes.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { Router } from 'express';
|
||||
import { ApiKeyController } from '../controllers/api-key.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const apiKeyRoutes = (authService: AuthService) => {
|
||||
const router = Router();
|
||||
const controller = new ApiKeyController();
|
||||
|
||||
router.post('/', requireAuth(authService), controller.generateApiKey);
|
||||
router.get('/', requireAuth(authService), controller.getApiKeys);
|
||||
router.delete('/:id', requireAuth(authService), controller.deleteApiKey);
|
||||
|
||||
return router;
|
||||
};
|
||||
@@ -1,20 +1,35 @@
|
||||
import { Router } from 'express';
|
||||
import { ArchivedEmailController } from '../controllers/archived-email.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { IAuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createArchivedEmailRouter = (
|
||||
archivedEmailController: ArchivedEmailController,
|
||||
authService: IAuthService
|
||||
archivedEmailController: ArchivedEmailController,
|
||||
authService: AuthService
|
||||
): Router => {
|
||||
const router = Router();
|
||||
const router = Router();
|
||||
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/ingestion-source/:ingestionSourceId', archivedEmailController.getArchivedEmails);
|
||||
router.get(
|
||||
'/ingestion-source/:ingestionSourceId',
|
||||
requirePermission('read', 'archive'),
|
||||
archivedEmailController.getArchivedEmails
|
||||
);
|
||||
|
||||
router.get('/:id', archivedEmailController.getArchivedEmailById);
|
||||
router.get(
|
||||
'/:id',
|
||||
requirePermission('read', 'archive'),
|
||||
archivedEmailController.getArchivedEmailById
|
||||
);
|
||||
|
||||
return router;
|
||||
router.delete(
|
||||
'/:id',
|
||||
requirePermission('delete', 'archive'),
|
||||
archivedEmailController.deleteArchivedEmail
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
|
||||
@@ -2,14 +2,28 @@ import { Router } from 'express';
|
||||
import type { AuthController } from '../controllers/auth.controller';
|
||||
|
||||
export const createAuthRouter = (authController: AuthController): Router => {
|
||||
const router = Router();
|
||||
const router = Router();
|
||||
|
||||
/**
|
||||
* @route POST /api/v1/auth/login
|
||||
* @description Authenticates a user and returns a JWT.
|
||||
* @access Public
|
||||
*/
|
||||
router.post('/login', authController.login);
|
||||
/**
|
||||
* @route POST /api/v1/auth/setup
|
||||
* @description Creates the initial administrator user.
|
||||
* @access Public
|
||||
*/
|
||||
router.post('/setup', authController.setup);
|
||||
|
||||
return router;
|
||||
/**
|
||||
* @route POST /api/v1/auth/login
|
||||
* @description Authenticates a user and returns a JWT.
|
||||
* @access Public
|
||||
*/
|
||||
router.post('/login', authController.login);
|
||||
|
||||
/**
|
||||
* @route GET /api/v1/auth/status
|
||||
* @description Checks if the application has been set up.
|
||||
* @access Public
|
||||
*/
|
||||
router.get('/status', authController.status);
|
||||
|
||||
return router;
|
||||
};
|
||||
|
||||
@@ -1,18 +1,39 @@
|
||||
import { Router } from 'express';
|
||||
import { dashboardController } from '../controllers/dashboard.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { IAuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createDashboardRouter = (authService: IAuthService): Router => {
|
||||
const router = Router();
|
||||
export const createDashboardRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/stats', dashboardController.getStats);
|
||||
router.get('/ingestion-history', dashboardController.getIngestionHistory);
|
||||
router.get('/ingestion-sources', dashboardController.getIngestionSources);
|
||||
router.get('/recent-syncs', dashboardController.getRecentSyncs);
|
||||
router.get('/indexed-insights', dashboardController.getIndexedInsights);
|
||||
router.get(
|
||||
'/stats',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getStats
|
||||
);
|
||||
router.get(
|
||||
'/ingestion-history',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionHistory
|
||||
);
|
||||
router.get(
|
||||
'/ingestion-sources',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIngestionSources
|
||||
);
|
||||
router.get(
|
||||
'/recent-syncs',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getRecentSyncs
|
||||
);
|
||||
router.get(
|
||||
'/indexed-insights',
|
||||
requirePermission('read', 'dashboard', 'dashboard.permissionRequired'),
|
||||
dashboardController.getIndexedInsights
|
||||
);
|
||||
|
||||
return router;
|
||||
return router;
|
||||
};
|
||||
|
||||
42
packages/backend/src/api/routes/iam.routes.ts
Normal file
42
packages/backend/src/api/routes/iam.routes.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import { Router } from 'express';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import type { IamController } from '../controllers/iam.controller';
|
||||
import type { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createIamRouter = (iamController: IamController, authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
/**
|
||||
* @route GET /api/v1/iam/roles
|
||||
* @description Gets all roles.
|
||||
* @access Private
|
||||
*/
|
||||
router.get('/roles', requirePermission('read', 'roles'), iamController.getRoles);
|
||||
|
||||
router.get('/roles/:id', requirePermission('read', 'roles'), iamController.getRoleById);
|
||||
|
||||
/**
|
||||
* Only super admin has the ability to modify existing roles or create new roles.
|
||||
*/
|
||||
router.post(
|
||||
'/roles',
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.createRole
|
||||
);
|
||||
|
||||
router.delete(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.deleteRole
|
||||
);
|
||||
|
||||
router.put(
|
||||
'/roles/:id',
|
||||
requirePermission('manage', 'all', 'iam.requiresSuperAdminRole'),
|
||||
iamController.updateRole
|
||||
);
|
||||
return router;
|
||||
};
|
||||
@@ -1,30 +1,41 @@
|
||||
import { Router } from 'express';
|
||||
import { IngestionController } from '../controllers/ingestion.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { IAuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createIngestionRouter = (
|
||||
ingestionController: IngestionController,
|
||||
authService: IAuthService
|
||||
ingestionController: IngestionController,
|
||||
authService: AuthService
|
||||
): Router => {
|
||||
const router = Router();
|
||||
const router = Router();
|
||||
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.post('/', ingestionController.create);
|
||||
router.post('/', requirePermission('create', 'ingestion'), ingestionController.create);
|
||||
|
||||
router.get('/', ingestionController.findAll);
|
||||
router.get('/', requirePermission('read', 'ingestion'), ingestionController.findAll);
|
||||
|
||||
router.get('/:id', ingestionController.findById);
|
||||
router.get('/:id', requirePermission('read', 'ingestion'), ingestionController.findById);
|
||||
|
||||
router.put('/:id', ingestionController.update);
|
||||
router.put('/:id', requirePermission('update', 'ingestion'), ingestionController.update);
|
||||
|
||||
router.delete('/:id', ingestionController.delete);
|
||||
router.delete('/:id', requirePermission('delete', 'ingestion'), ingestionController.delete);
|
||||
|
||||
router.post('/:id/sync', ingestionController.triggerInitialImport);
|
||||
router.post(
|
||||
'/:id/import',
|
||||
requirePermission('create', 'ingestion'),
|
||||
ingestionController.triggerInitialImport
|
||||
);
|
||||
|
||||
router.post('/:id/pause', ingestionController.pause);
|
||||
router.post('/:id/pause', requirePermission('update', 'ingestion'), ingestionController.pause);
|
||||
|
||||
return router;
|
||||
router.post(
|
||||
'/:id/sync',
|
||||
requirePermission('sync', 'ingestion'),
|
||||
ingestionController.triggerForceSync
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
import { Router } from 'express';
|
||||
import { SearchController } from '../controllers/search.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { IAuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createSearchRouter = (
|
||||
searchController: SearchController,
|
||||
authService: IAuthService
|
||||
searchController: SearchController,
|
||||
authService: AuthService
|
||||
): Router => {
|
||||
const router = Router();
|
||||
const router = Router();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/', searchController.search);
|
||||
router.get('/', requirePermission('search', 'archive'), searchController.search);
|
||||
|
||||
return router;
|
||||
return router;
|
||||
};
|
||||
|
||||
25
packages/backend/src/api/routes/settings.routes.ts
Normal file
25
packages/backend/src/api/routes/settings.routes.ts
Normal file
@@ -0,0 +1,25 @@
|
||||
import { Router } from 'express';
|
||||
import * as settingsController from '../controllers/settings.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createSettingsRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
// Public route to get non-sensitive settings. settings read should not be scoped with a permission because all end users need the settings data in the frontend. However, for sensitive settings data, we need to add a new permission subject to limit access. So this route should only expose non-sensitive settings data.
|
||||
/**
|
||||
* @returns SystemSettings
|
||||
*/
|
||||
router.get('/system', settingsController.getSystemSettings);
|
||||
|
||||
// Protected route to update settings
|
||||
router.put(
|
||||
'/system',
|
||||
requireAuth(authService),
|
||||
requirePermission('manage', 'settings', 'settings.noPermissionToUpdate'),
|
||||
settingsController.updateSystemSettings
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
@@ -1,18 +1,19 @@
|
||||
import { Router } from 'express';
|
||||
import { StorageController } from '../controllers/storage.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { IAuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createStorageRouter = (
|
||||
storageController: StorageController,
|
||||
authService: IAuthService
|
||||
storageController: StorageController,
|
||||
authService: AuthService
|
||||
): Router => {
|
||||
const router = Router();
|
||||
const router = Router();
|
||||
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
// Secure all routes in this module
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/download', storageController.downloadFile);
|
||||
router.get('/download', requirePermission('read', 'archive'), storageController.downloadFile);
|
||||
|
||||
return router;
|
||||
return router;
|
||||
};
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
import { Router } from 'express';
|
||||
import { ingestionQueue } from '../../jobs/queues';
|
||||
|
||||
const router: Router = Router();
|
||||
|
||||
router.post('/trigger-job', async (req, res) => {
|
||||
try {
|
||||
const job = await ingestionQueue.add('initial-import', {
|
||||
ingestionSourceId: 'test-source-id-test-2345'
|
||||
});
|
||||
res.status(202).json({ message: 'Test job triggered successfully', jobId: job.id });
|
||||
} catch (error) {
|
||||
console.error('Failed to trigger test job', error);
|
||||
res.status(500).json({ message: 'Failed to trigger test job' });
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
15
packages/backend/src/api/routes/upload.routes.ts
Normal file
15
packages/backend/src/api/routes/upload.routes.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import { Router } from 'express';
|
||||
import { uploadFile } from '../controllers/upload.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
|
||||
export const createUploadRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.post('/', requirePermission('create', 'ingestion'), uploadFile);
|
||||
|
||||
return router;
|
||||
};
|
||||
38
packages/backend/src/api/routes/user.routes.ts
Normal file
38
packages/backend/src/api/routes/user.routes.ts
Normal file
@@ -0,0 +1,38 @@
|
||||
import { Router } from 'express';
|
||||
import * as userController from '../controllers/user.controller';
|
||||
import { requireAuth } from '../middleware/requireAuth';
|
||||
import { requirePermission } from '../middleware/requirePermission';
|
||||
import { AuthService } from '../../services/AuthService';
|
||||
|
||||
export const createUserRouter = (authService: AuthService): Router => {
|
||||
const router = Router();
|
||||
|
||||
router.use(requireAuth(authService));
|
||||
|
||||
router.get('/', requirePermission('read', 'users'), userController.getUsers);
|
||||
|
||||
router.get('/:id', requirePermission('read', 'users'), userController.getUser);
|
||||
|
||||
/**
|
||||
* Only super admin has the ability to modify existing users or create new users.
|
||||
*/
|
||||
router.post(
|
||||
'/',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.createUser
|
||||
);
|
||||
|
||||
router.put(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.updateUser
|
||||
);
|
||||
|
||||
router.delete(
|
||||
'/:id',
|
||||
requirePermission('manage', 'all', 'user.requiresSuperAdminRole'),
|
||||
userController.deleteUser
|
||||
);
|
||||
|
||||
return router;
|
||||
};
|
||||
12
packages/backend/src/config/api.ts
Normal file
12
packages/backend/src/config/api.ts
Normal file
@@ -0,0 +1,12 @@
|
||||
import 'dotenv/config';
|
||||
|
||||
export const apiConfig = {
|
||||
rateLimit: {
|
||||
windowMs: process.env.RATE_LIMIT_WINDOW_MS
|
||||
? parseInt(process.env.RATE_LIMIT_WINDOW_MS, 10)
|
||||
: 1 * 60 * 1000, // 1 minutes
|
||||
max: process.env.RATE_LIMIT_MAX_REQUESTS
|
||||
? parseInt(process.env.RATE_LIMIT_MAX_REQUESTS, 10)
|
||||
: 100, // limit each IP to 100 requests per windowMs
|
||||
},
|
||||
};
|
||||
@@ -1,7 +1,9 @@
|
||||
import 'dotenv/config';
|
||||
|
||||
export const app = {
|
||||
nodeEnv: process.env.NODE_ENV || 'development',
|
||||
port: process.env.PORT_BACKEND ? parseInt(process.env.PORT_BACKEND, 10) : 4000,
|
||||
encryptionKey: process.env.ENCRYPTION_KEY,
|
||||
nodeEnv: process.env.NODE_ENV || 'development',
|
||||
port: process.env.PORT_BACKEND ? parseInt(process.env.PORT_BACKEND, 10) : 4000,
|
||||
encryptionKey: process.env.ENCRYPTION_KEY,
|
||||
isDemo: process.env.IS_DEMO === 'true',
|
||||
syncFrequency: process.env.SYNC_FREQUENCY || '* * * * *', //default to 1 minute
|
||||
};
|
||||
|
||||
@@ -2,10 +2,12 @@ import { storage } from './storage';
|
||||
import { app } from './app';
|
||||
import { searchConfig } from './search';
|
||||
import { connection as redisConfig } from './redis';
|
||||
import { apiConfig } from './api';
|
||||
|
||||
export const config = {
|
||||
storage,
|
||||
app,
|
||||
search: searchConfig,
|
||||
redis: redisConfig,
|
||||
storage,
|
||||
app,
|
||||
search: searchConfig,
|
||||
redis: redisConfig,
|
||||
api: apiConfig,
|
||||
};
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import pino from 'pino';
|
||||
|
||||
export const logger = pino({
|
||||
level: process.env.LOG_LEVEL || 'info',
|
||||
transport: {
|
||||
target: 'pino-pretty',
|
||||
options: {
|
||||
colorize: true
|
||||
}
|
||||
}
|
||||
level: process.env.LOG_LEVEL || 'info',
|
||||
transport: {
|
||||
target: 'pino-pretty',
|
||||
options: {
|
||||
colorize: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
@@ -3,12 +3,17 @@ import 'dotenv/config';
|
||||
/**
|
||||
* @see https://github.com/taskforcesh/bullmq/blob/master/docs/gitbook/guide/connections.md
|
||||
*/
|
||||
export const connection = {
|
||||
host: process.env.REDIS_HOST || 'localhost',
|
||||
port: (process.env.REDIS_PORT && parseInt(process.env.REDIS_PORT, 10)) || 6379,
|
||||
password: process.env.REDIS_PASSWORD,
|
||||
maxRetriesPerRequest: null,
|
||||
tls: {
|
||||
rejectUnauthorized: false
|
||||
}
|
||||
const connectionOptions: any = {
|
||||
host: process.env.REDIS_HOST || 'localhost',
|
||||
port: (process.env.REDIS_PORT && parseInt(process.env.REDIS_PORT, 10)) || 6379,
|
||||
password: process.env.REDIS_PASSWORD,
|
||||
enableReadyCheck: true,
|
||||
};
|
||||
|
||||
if (process.env.REDIS_TLS_ENABLED === 'true') {
|
||||
connectionOptions.tls = {
|
||||
rejectUnauthorized: false,
|
||||
};
|
||||
}
|
||||
|
||||
export const connection = connectionOptions;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import 'dotenv/config';
|
||||
|
||||
export const searchConfig = {
|
||||
host: process.env.MEILI_HOST || 'http://127.0.0.1:7700',
|
||||
apiKey: process.env.MEILI_MASTER_KEY || '',
|
||||
host: process.env.MEILI_HOST || 'http://127.0.0.1:7700',
|
||||
apiKey: process.env.MEILI_MASTER_KEY || '',
|
||||
};
|
||||
|
||||
@@ -2,37 +2,39 @@ import { StorageConfig } from '@open-archiver/types';
|
||||
import 'dotenv/config';
|
||||
|
||||
const storageType = process.env.STORAGE_TYPE;
|
||||
|
||||
const openArchiverFolderName = 'open-archiver';
|
||||
let storageConfig: StorageConfig;
|
||||
|
||||
if (storageType === 'local') {
|
||||
if (!process.env.STORAGE_LOCAL_ROOT_PATH) {
|
||||
throw new Error('STORAGE_LOCAL_ROOT_PATH is not defined in the environment variables');
|
||||
}
|
||||
storageConfig = {
|
||||
type: 'local',
|
||||
rootPath: process.env.STORAGE_LOCAL_ROOT_PATH,
|
||||
};
|
||||
if (!process.env.STORAGE_LOCAL_ROOT_PATH) {
|
||||
throw new Error('STORAGE_LOCAL_ROOT_PATH is not defined in the environment variables');
|
||||
}
|
||||
storageConfig = {
|
||||
type: 'local',
|
||||
rootPath: process.env.STORAGE_LOCAL_ROOT_PATH,
|
||||
openArchiverFolderName: openArchiverFolderName,
|
||||
};
|
||||
} else if (storageType === 's3') {
|
||||
if (
|
||||
!process.env.STORAGE_S3_ENDPOINT ||
|
||||
!process.env.STORAGE_S3_BUCKET ||
|
||||
!process.env.STORAGE_S3_ACCESS_KEY_ID ||
|
||||
!process.env.STORAGE_S3_SECRET_ACCESS_KEY
|
||||
) {
|
||||
throw new Error('One or more S3 storage environment variables are not defined');
|
||||
}
|
||||
storageConfig = {
|
||||
type: 's3',
|
||||
endpoint: process.env.STORAGE_S3_ENDPOINT,
|
||||
bucket: process.env.STORAGE_S3_BUCKET,
|
||||
accessKeyId: process.env.STORAGE_S3_ACCESS_KEY_ID,
|
||||
secretAccessKey: process.env.STORAGE_S3_SECRET_ACCESS_KEY,
|
||||
region: process.env.STORAGE_S3_REGION,
|
||||
forcePathStyle: process.env.STORAGE_S3_FORCE_PATH_STYLE === 'true',
|
||||
};
|
||||
if (
|
||||
!process.env.STORAGE_S3_ENDPOINT ||
|
||||
!process.env.STORAGE_S3_BUCKET ||
|
||||
!process.env.STORAGE_S3_ACCESS_KEY_ID ||
|
||||
!process.env.STORAGE_S3_SECRET_ACCESS_KEY
|
||||
) {
|
||||
throw new Error('One or more S3 storage environment variables are not defined');
|
||||
}
|
||||
storageConfig = {
|
||||
type: 's3',
|
||||
endpoint: process.env.STORAGE_S3_ENDPOINT,
|
||||
bucket: process.env.STORAGE_S3_BUCKET,
|
||||
accessKeyId: process.env.STORAGE_S3_ACCESS_KEY_ID,
|
||||
secretAccessKey: process.env.STORAGE_S3_SECRET_ACCESS_KEY,
|
||||
region: process.env.STORAGE_S3_REGION,
|
||||
forcePathStyle: process.env.STORAGE_S3_FORCE_PATH_STYLE === 'true',
|
||||
openArchiverFolderName: openArchiverFolderName,
|
||||
};
|
||||
} else {
|
||||
throw new Error(`Invalid STORAGE_TYPE: ${storageType}`);
|
||||
throw new Error(`Invalid STORAGE_TYPE: ${storageType}`);
|
||||
}
|
||||
|
||||
export const storage = storageConfig;
|
||||
|
||||
@@ -3,10 +3,12 @@ import postgres from 'postgres';
|
||||
import 'dotenv/config';
|
||||
|
||||
import * as schema from './schema';
|
||||
import { encodeDatabaseUrl } from '../helpers/db';
|
||||
|
||||
if (!process.env.DATABASE_URL) {
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
}
|
||||
|
||||
const client = postgres(process.env.DATABASE_URL);
|
||||
const connectionString = encodeDatabaseUrl(process.env.DATABASE_URL);
|
||||
const client = postgres(connectionString);
|
||||
export const db = drizzle(client, { schema });
|
||||
|
||||
@@ -2,26 +2,28 @@ import { migrate } from 'drizzle-orm/postgres-js/migrator';
|
||||
import { drizzle } from 'drizzle-orm/postgres-js';
|
||||
import postgres from 'postgres';
|
||||
import { config } from 'dotenv';
|
||||
import { encodeDatabaseUrl } from '../helpers/db';
|
||||
|
||||
config();
|
||||
|
||||
const runMigrate = async () => {
|
||||
if (!process.env.DATABASE_URL) {
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
}
|
||||
if (!process.env.DATABASE_URL) {
|
||||
throw new Error('DATABASE_URL is not set in the .env file');
|
||||
}
|
||||
|
||||
const connection = postgres(process.env.DATABASE_URL, { max: 1 });
|
||||
const db = drizzle(connection);
|
||||
const connectionString = encodeDatabaseUrl(process.env.DATABASE_URL);
|
||||
const connection = postgres(connectionString, { max: 1 });
|
||||
const db = drizzle(connection);
|
||||
|
||||
console.log('Running migrations...');
|
||||
console.log('Running migrations...');
|
||||
|
||||
await migrate(db, { migrationsFolder: 'src/database/migrations' });
|
||||
await migrate(db, { migrationsFolder: 'src/database/migrations' });
|
||||
|
||||
console.log('Migrations completed!');
|
||||
process.exit(0);
|
||||
console.log('Migrations completed!');
|
||||
process.exit(0);
|
||||
};
|
||||
|
||||
runMigrate().catch((err) => {
|
||||
console.error('Migration failed!', err);
|
||||
process.exit(1);
|
||||
console.error('Migration failed!', err);
|
||||
process.exit(1);
|
||||
});
|
||||
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "archived_emails" ADD COLUMN "thread_id" text;--> statement-breakpoint
|
||||
CREATE INDEX "thread_id_idx" ON "archived_emails" USING btree ("thread_id");
|
||||
@@ -0,0 +1,36 @@
|
||||
CREATE TABLE "roles" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"policies" jsonb DEFAULT '[]'::jsonb NOT NULL,
|
||||
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp DEFAULT now() NOT NULL,
|
||||
CONSTRAINT "roles_name_unique" UNIQUE("name")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "sessions" (
|
||||
"id" text PRIMARY KEY NOT NULL,
|
||||
"user_id" uuid NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "user_roles" (
|
||||
"user_id" uuid NOT NULL,
|
||||
"role_id" uuid NOT NULL,
|
||||
CONSTRAINT "user_roles_user_id_role_id_pk" PRIMARY KEY("user_id","role_id")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
CREATE TABLE "users" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"email" text NOT NULL,
|
||||
"name" text,
|
||||
"password" text,
|
||||
"provider" text DEFAULT 'local',
|
||||
"provider_id" text,
|
||||
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp DEFAULT now() NOT NULL,
|
||||
CONSTRAINT "users_email_unique" UNIQUE("email")
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "sessions" ADD CONSTRAINT "sessions_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "user_roles" ADD CONSTRAINT "user_roles_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;--> statement-breakpoint
|
||||
ALTER TABLE "user_roles" ADD CONSTRAINT "user_roles_role_id_roles_id_fk" FOREIGN KEY ("role_id") REFERENCES "public"."roles"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "users" RENAME COLUMN "name" TO "first_name";--> statement-breakpoint
|
||||
ALTER TABLE "users" ADD COLUMN "last_name" text;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TYPE "public"."ingestion_provider" ADD VALUE 'pst_import';--> statement-breakpoint
|
||||
ALTER TYPE "public"."ingestion_status" ADD VALUE 'imported';
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "archived_emails" ADD COLUMN "path" text;--> statement-breakpoint
|
||||
ALTER TABLE "archived_emails" ADD COLUMN "tags" jsonb;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TYPE "public"."ingestion_provider" ADD VALUE 'eml_import';
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "ingestion_sources" ADD COLUMN "user_id" uuid;--> statement-breakpoint
|
||||
ALTER TABLE "ingestion_sources" ADD CONSTRAINT "ingestion_sources_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE "roles" ADD COLUMN "slug" text;--> statement-breakpoint
|
||||
ALTER TABLE "roles" ADD CONSTRAINT "roles_slug_unique" UNIQUE("slug");
|
||||
@@ -0,0 +1,4 @@
|
||||
CREATE TABLE "system_settings" (
|
||||
"id" serial PRIMARY KEY NOT NULL,
|
||||
"config" jsonb NOT NULL
|
||||
);
|
||||
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE "api_keys" (
|
||||
"id" uuid PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL,
|
||||
"name" text NOT NULL,
|
||||
"user_id" uuid NOT NULL,
|
||||
"key" text NOT NULL,
|
||||
"expires_at" timestamp with time zone NOT NULL,
|
||||
"created_at" timestamp DEFAULT now() NOT NULL,
|
||||
"updated_at" timestamp DEFAULT now() NOT NULL
|
||||
);
|
||||
--> statement-breakpoint
|
||||
ALTER TABLE "api_keys" ADD CONSTRAINT "api_keys_user_id_users_id_fk" FOREIGN KEY ("user_id") REFERENCES "public"."users"("id") ON DELETE cascade ON UPDATE no action;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE "api_keys" ADD COLUMN "key_hash" text NOT NULL;
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user