Add Worker service configuration, deployment setup, and Nginx routing

This commit is contained in:
Simon Larsen
2024-09-16 08:42:50 -07:00
parent 5a31785895
commit 86168a50ee
10 changed files with 169 additions and 1 deletions

14
.vscode/launch.json vendored
View File

@@ -77,6 +77,20 @@
"restart": true,
"autoAttachChildProcesses": true
},
{
"address": "127.0.0.1",
"localRoot": "${workspaceFolder}/Worker",
"name": "Worker: Debug with Docker",
"port": 8734,
"remoteRoot": "/usr/src/app",
"request": "attach",
"skipFiles": [
"<node_internals>/**"
],
"type": "node",
"restart": true,
"autoAttachChildProcesses": true
},
{
"address": "127.0.0.1",
"localRoot": "${workspaceFolder}/TestServer",

View File

@@ -100,6 +100,12 @@ export const IsolatedVMHostname: Hostname = Hostname.fromString(
}`,
);
export const WorkerHostname: Hostname = Hostname.fromString(
`${process.env["SERVER_WORKER_HOSTNAME"] || "localhost"}:${
process.env["WORKER_PORT"] || 80
}`,
);
export const HomeHostname: Hostname = Hostname.fromString(
`${process.env["SERVER_HOME_HOSTNAME"] || "localhost"}:${
process.env["HOME_PORT"] || 80

View File

@@ -48,6 +48,8 @@ Usage:
value: {{ $.Release.Name }}-accounts.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: SERVER_ISOLATED_VM_HOSTNAME
value: {{ $.Release.Name }}-isolated-vm.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: SERVER_WORKER_HOSTNAME
value: {{ $.Release.Name }}-worker.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: SERVER_HOME_HOSTNAME
value: {{ $.Release.Name }}-home.{{ $.Release.Namespace }}.svc.{{ $.Values.global.clusterDomain }}
- name: SERVER_APP_HOSTNAME
@@ -79,6 +81,8 @@ Usage:
value: {{ $.Values.port.isolatedVM | squote }}
- name: HOME_PORT
value: {{ $.Values.port.home | squote }}
- name: WORKER_PORT
value: {{ $.Values.port.worker | squote }}
- name: STATUS_PAGE_PORT
value: {{ $.Values.port.statusPage | squote }}
- name: DASHBOARD_PORT

View File

@@ -0,0 +1,85 @@
# OneUptime worker Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" $.Release.Name "worker" }}
namespace: {{ $.Release.Namespace }}
labels:
app: {{ printf "%s-%s" $.Release.Name "worker" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
appname: oneuptime
date: "{{ now | unixEpoch }}"
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name "worker" }}
replicas: {{ $.Values.deployment.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" $.Release.Name "worker" }}
date: "{{ now | unixEpoch }}"
appname: oneuptime
spec:
{{- if $.Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml $.Values.imagePullSecrets | nindent 8 }}
{{- end }}
{{- if $.Values.podSecurityContext }}
securityContext: {{- $.Values.podSecurityContext | toYaml | nindent 8 }}
{{- end }}
{{- if $.Values.affinity }}
affinity: {{- $.Values.affinity | toYaml | nindent 8 }}
{{- end }}
{{- if $.Values.tolerations }}
tolerations: {{- $.Values.tolerations | toYaml | nindent 8 }}
{{- end }}
{{- if $.Values.nodeSelector }}
nodeSelector: {{- $.Values.nodeSelector | toYaml | nindent 8 }}
{{- end }}
containers:
- image: {{ printf "%s/%s/%s:%s" $.Values.image.registry $.Values.image.repository "worker" $.Values.image.tag }}
name: {{ printf "%s-%s" $.Release.Name "worker" }}
# Liveness probe
livenessProbe:
httpGet:
path: /status/live
port: {{ $.Values.port.worker }}
initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 30
# Readyness Probe
readinessProbe:
httpGet:
path: /status/ready
port: {{ $.Values.port.worker }}
initialDelaySeconds: 300
periodSeconds: 10
timeoutSeconds: 30
{{- if $.Values.containerSecurityContext }}
securityContext: {{- $.Values.containerSecurityContext | toYaml | nindent 12 }}
{{- end }}
imagePullPolicy: {{ $.Values.image.pullPolicy }}
env:
{{- include "oneuptime.env.common" . | nindent 12 }}
{{- include "oneuptime.env.commonServer" . | nindent 12 }}
{{- include "oneuptime.env.oneuptimeSecret" . | nindent 12 }}
ports:
- containerPort: {{ $.Values.port.worker }}
protocol: TCP
name: http
restartPolicy: {{ $.Values.image.restartPolicy }}
---
# OneUptime app Service
{{- $workerPorts := dict "port" $.Values.port.worker -}}
{{- $workerServiceArgs := dict "ServiceName" "worker" "Ports" $workerPorts "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.service" $workerServiceArgs }}
---
# OneUptime app autoscaler
{{- $workerAutoScalerArgs := dict "ServiceName" "worker" "Release" $.Release "Values" $.Values -}}
{{- include "oneuptime.autoscaler" $workerAutoScalerArgs }}
---

View File

@@ -208,6 +208,7 @@ port:
otelCollectorHttp: 4318
isolatedVM: 4572
home: 1444
worker: 1445
testServer:

View File

@@ -22,6 +22,10 @@ upstream isolated-vm {
server ${SERVER_ISOLATED_VM_HOSTNAME}:${ISOLATED_VM_PORT} weight=10 max_fails=3 fail_timeout=30s;
}
upstream worker {
server ${SERVER_WORKER_HOSTNAME}:${WORKER_PORT} weight=10 max_fails=3 fail_timeout=30s;
}
upstream home {
server ${SERVER_HOME_HOSTNAME}:${HOME_PORT} weight=10 max_fails=3 fail_timeout=30s;
}
@@ -597,6 +601,21 @@ server {
proxy_pass http://isolated-vm;
}
location /worker {
# This is for nginx not to crash when service is not available.
resolver 127.0.0.1 valid=30s;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# enable WebSockets (for ws://sockjs not connected error in the accounts source: https://stackoverflow.com/questions/41381444/websocket-connection-failed-error-during-websocket-handshake-unexpected-respon)
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://worker;
}
location /status-page {
# This is for nginx not to crash when service is not available.
resolver 127.0.0.1 valid=30s;

View File

@@ -115,7 +115,8 @@ DASHBOARD_PORT=3009
ADMIN_DASHBOARD_PORT=3158
OTEL_COLLECTOR_HTTP_PORT=4318
ISOLATED_VM_PORT=4572
HOME_PORT=1444
WORKER_PORT=1445
# If USE_INTERNAL_SMTP is true then you need to fill these values.
INTERNAL_SMTP_FROM_NAME=OneUptime

View File

@@ -34,6 +34,7 @@ x-common-variables: &common-variables
SERVER_ADMIN_DASHBOARD_HOSTNAME: admin-dashboard
SERVER_OTEL_COLLECTOR_HOSTNAME: otel-collector
SERVER_ISOLATED_VM_HOSTNAME: isolated-vm
SERVER_WORKER_HOSTNAME: worker
SERVER_HOME_HOSTNAME: home
#Ports. Usually they don't need to change.
@@ -47,6 +48,7 @@ x-common-variables: &common-variables
DASHBOARD_PORT: ${DASHBOARD_PORT}
ADMIN_DASHBOARD_PORT: ${ADMIN_DASHBOARD_PORT}
ISOLATED_VM_PORT: ${ISOLATED_VM_PORT}
WORKER_PORT: ${WORKER_PORT}
OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT: ${OPENTELEMETRY_EXPORTER_OTLP_ENDPOINT}
OPENTELEMETRY_EXPORTER_OTLP_HEADERS: ${OPENTELEMETRY_EXPORTER_OTLP_HEADERS}
@@ -288,6 +290,18 @@ services:
options:
max-size: "1000m"
worker:
networks:
- oneuptime
restart: always
environment:
<<: *common-server-variables
PORT: ${WORKER_PORT}
logging:
driver: "local"
options:
max-size: "1000m"
probe-1:
networks:
- oneuptime

View File

@@ -178,6 +178,24 @@ services:
context: .
dockerfile: ./Home/Dockerfile
worker:
volumes:
- ./Worker:/usr/src/app
# Use node modules of the container and not host system.
# https://stackoverflow.com/questions/29181032/add-a-volume-to-docker-but-exclude-a-sub-folder
- /usr/src/app/node_modules/
- ./Common:/usr/src/Common
- /usr/src/Common/node_modules/
extends:
file: ./docker-compose.base.yml
service: worker
ports:
- '8734:9229' # Debugging port.
build:
network: host
context: .
dockerfile: ./Worker/Dockerfile
app:
volumes:
- ./App:/usr/src/app

View File

@@ -64,6 +64,12 @@ services:
extends:
file: ./docker-compose.base.yml
service: app
worker:
image: oneuptime/worker:${APP_TAG}
extends:
file: ./docker-compose.base.yml
service: worker
home:
image: oneuptime/home:${APP_TAG}