update helm for oneuptime

This commit is contained in:
deityhub
2021-11-23 00:10:19 +01:00
parent 8d4cd95b06
commit dbc0412079
49 changed files with 3322 additions and 35 deletions

View File

@@ -16,7 +16,7 @@ RUN npm ci --only=production
COPY . /usr/src/app
# Expose ports.
# - 3423: Fyipe Helm Chart Server
# - 3423: OneUptime Helm Chart Server
EXPOSE 3423
#Run the app

View File

@@ -1,5 +1,5 @@
#
# Fyipe Docs Dockerfile
# OneUptime Docs Dockerfile
#
# Pull base image nodejs image.
@@ -22,7 +22,7 @@ COPY ./package-lock.json /usr/src/app/package-lock.json
RUN npm ci
# Expose ports.
# - 3423: Fyipe Helm Chart Server
# - 3423: OneUptime Helm Chart Server
EXPOSE 3423
# Expose Debugger port

View File

@@ -1,3 +1,3 @@
# Helm Chart for Fyipe
# Helm Chart for OneUptime
This project contains helm chart for fyipe.
This project contains helm chart for oneuptime.

View File

@@ -1,7 +1,7 @@
{
"name": "helm-chart",
"version": "3.0.0",
"description": "Helm Chart Server for Fyipe",
"description": "Helm Chart Server for OneUptime",
"main": "server.js",
"scripts": {
"preinstall": "npx npm-force-resolutions || echo 'No package-lock.json file. Skipping force resolutions'",

View File

@@ -1,4 +1,4 @@
# This document is for launching Fyipe VM's on Azure, GCP and AWS.
# This document is for launching OneUptime VM's on Azure, GCP and AWS.
# Follow the installation steps below.
# Azure: https://docs.microsoft.com/en-us/azure/marketplace/partner-center-portal/azure-vm-create-offer#technical-configuration
@@ -7,16 +7,16 @@
# Add these lines to: sudo crontab -e
# Run this on Reboot.
@reboot sudo curl https://fyipe.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled
@reboot sudo curl https://oneuptime.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled
# Run this once every 24 hours.
0 0 * * * sudo curl https://fyipe.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled
0 0 * * * sudo curl https://oneuptime.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled
## FOR AWS EC2
# Run this on Reboot.
@reboot sudo curl https://fyipe.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled aws-ec2
@reboot sudo curl https://oneuptime.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled aws-ec2
# Run this once every 24 hours.
0 0 * * * sudo curl https://fyipe.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled aws-ec2
0 0 * * * sudo curl https://oneuptime.com/chart/install.sh | sudo bash -s thirdPartyBillingEnabled aws-ec2

View File

@@ -1,6 +1,6 @@
apiVersion: v1
entries:
Fyipe:
OneUptime:
- apiVersion: v2
created: "2021-09-10T14:13:26.551683+01:00"
dependencies:
@@ -15,8 +15,8 @@ entries:
version: 14.8.8
description: One complete DevOps and DevOps platform.
digest: 84b0d954025bc9d98419d3f7d4ee9e4470db717297e4fc1aaa546af75b16a96a
home: https://fyipe.com
icon: https://fyipe.com/img/Fyipe.svg
home: https://oneuptime.com
icon: https://oneuptime.com/img/Fyipe.svg
keywords:
- DevOps
- IT DevOps
@@ -32,11 +32,11 @@ entries:
- Website Test
- SRE
maintainers:
- email: support@fyipe.com
name: Fyipe Support
url: https://fyipe.com/support
name: Fyipe
- email: support@oneuptime.com
name: OneUptime Support
url: https://oneuptime.com/support
name: OneUptime
urls:
- Fyipe-3.0.0.tgz
- OneUptime-3.0.0.tgz
version: 3.0.0
generated: "2021-09-10T14:13:26.53643+01:00"

View File

@@ -1,6 +1,6 @@
# Important:
# This script will setup MicroK8s and install Fyipe on it.
# This is used to install Fyipe on a standalone VM
# This script will setup MicroK8s and install OneUptime on it.
# This is used to install OneUptime on a standalone VM
# This is usally used for CI/CD testing, and to update VM's on GCP, Azure and AWS.
# If this is the first install, then helm wont be found.
@@ -50,7 +50,7 @@ if [[ ! -n $DKIM_PRIVATE_KEY ]]; then
openssl rsa -in private -out public -pubout
# value of DKIM dns record
echo "DKIM DNS TXT Record"
echo "DNS Selector: fyipe._domainkey"
echo "DNS Selector: oneuptime._domainkey"
echo "DNS Value: v=DKIM1;p=$(grep -v '^-' public | tr -d '\n')"
DKIM_PRIVATE_KEY=$(cat private | base64)
fi
@@ -143,7 +143,7 @@ then
fi
AVAILABLE_VERSION=$(curl https://fyipe.com/api/version | jq '.server' | tr -d '"')
AVAILABLE_VERSION=$(curl https://oneuptime.com/api/version | jq '.server' | tr -d '"')
AVAILABLE_VERSION_BUILD=$(echo $AVAILABLE_VERSION | tr "." "0")
IMAGE_VERSION=$(sudo k get deployment fi-accounts -o=jsonpath='{$.spec.template.spec.containers[:1].image}' || echo 0)
@@ -165,13 +165,13 @@ then
fi
# Install cluster with Helm.
sudo helm repo add fyipe https://fyipe.com/chart || echo "Fyipe already added"
sudo helm repo add oneuptime https://oneuptime.com/chart || echo "OneUptime already added"
sudo helm repo update
function updateinstallation {
sudo k delete job fyipe-init-script || echo "init-script already deleted"
sudo helm upgrade --reuse-values fi fyipe/Fyipe \
sudo k delete job oneuptime-init-script || echo "init-script already deleted"
sudo helm upgrade --reuse-values fi oneuptime/OneUptime \
--set image.tag=$AVAILABLE_VERSION
}
@@ -188,18 +188,18 @@ then
# Chart not deployed. Create a new deployment. Set service of type nodeport for VM's.
# Add Admin Email and Password on AWS.
sudo helm install fi fyipe/Fyipe \
sudo helm install fi oneuptime/OneUptime \
--set isThirdPartyBilling=true \
--set nginx-ingress-controller.service.type=NodePort \
--set nginx-ingress-controller.hostNetwork=true \
--set image.tag=$AVAILABLE_VERSION \
--set fyipe.admin.email=admin@admin.com \
--set oneuptime.admin.email=admin@admin.com \
--set disableSignup=true \
--set fyipe.admin.password=$INSTANCEID
--set oneuptime.admin.password=$INSTANCEID
else
# Chart not deployed. Create a new deployment. Set service of type nodeport for VM's. This is used for Azure and AWS.
sudo helm install fi fyipe/Fyipe \
sudo helm install fi oneuptime/OneUptime \
--set isThirdPartyBilling=true \
--set nginx-ingress-controller.service.type=NodePort \
--set nginx-ingress-controller.hostNetwork=true \
@@ -215,27 +215,27 @@ then
# install services.
if [[ "$2" == "enterprise" ]]
then
sudo helm install -f ./kubernetes/values-enterprise-ci.yaml fi ./helm-chart/public/fyipe \
sudo helm install -f ./kubernetes/values-enterprise-ci.yaml fi ./helm-chart/public/oneuptime \
--set haraka.domain=$DOMAIN \
--set haraka.dkimPrivateKey=$DKIM_PRIVATE_KEY \
--set haraka.tlsCert=$TLS_CERT \
--set haraka.tlsKey=$TLS_KEY
else
sudo helm install -f ./kubernetes/values-saas-ci.yaml fi ./helm-chart/public/fyipe \
sudo helm install -f ./kubernetes/values-saas-ci.yaml fi ./helm-chart/public/oneuptime \
--set haraka.domain=$DOMAIN \
--set haraka.dkimPrivateKey=$DKIM_PRIVATE_KEY \
--set haraka.tlsCert=$TLS_CERT \
--set haraka.tlsKey=$TLS_KEY
fi
else
sudo k delete job fyipe-init-script || echo "init-script already deleted"
sudo helm upgrade --reuse-values fi ./helm-chart/public/fyipe
sudo k delete job oneuptime-init-script || echo "init-script already deleted"
sudo helm upgrade --reuse-values fi ./helm-chart/public/oneuptime
fi
else
if [[ $DEPLOYED_VERSION_BUILD -eq 0 ]]
then
# set service of type nodeport for VM's.
sudo helm install fi fyipe/Fyipe \
sudo helm install fi oneuptime/OneUptime \
--set nginx-ingress-controller.service.type=NodePort \
--set nginx-ingress-controller.hostNetwork=true \
--set image.tag=$AVAILABLE_VERSION \

View File

@@ -0,0 +1 @@
.git

View File

@@ -0,0 +1,12 @@
dependencies:
- name: nginx-ingress-controller
repository: https://charts.bitnami.com/bitnami
version: 6.0.1
- name: mongodb
repository: https://charts.bitnami.com/bitnami
version: 10.23.10
- name: redis
repository: https://charts.bitnami.com/bitnami
version: 10.5.11
digest: sha256:a75a0ffb78f91da3da43a6808df7c9c1d8a5736f512f8e15f6ff67775d3b9ecb
generated: "2021-10-05T18:40:47.90754+01:00"

View File

@@ -0,0 +1,36 @@
apiVersion: v2
name: OneUptime
version: 3.0.0
description: One complete DevOps and DevOps platform.
keywords:
- DevOps
- IT DevOps
- DevOps
- Monitoring
- Status Page
- On-Call
- On-Call Management
- Incident Management
- Performance Monitoring
- API Test
- Website Monitoring
- Website Test
- SRE
home: https://oneuptime.com
dependencies:
# https://github.com/kubernetes/ingress-nginx/tree/master/charts/ingress-nginx
- name: nginx-ingress-controller
repository: https://charts.bitnami.com/bitnami
version: "6.0.1"
- name: mongodb
version: "10.23.10"
repository: "https://charts.bitnami.com/bitnami"
- name: redis
version: "10.5.11"
repository: "https://charts.bitnami.com/bitnami"
maintainers:
- name: OneUptime Support
email: support@oneuptime.com
url: https://oneuptime.com/support
icon: https://oneuptime.com/img/Fyipe.svg
engine: gotplhelm dependency update

View File

@@ -0,0 +1,42 @@
End-User License Agreement (EULA) of OneUptime
This End-User License Agreement ("EULA") is a legal agreement between you and HackerBay, Inc.
This EULA agreement governs your acquisition and use of our OneUptime software ("Software") directly from HackerBay, Inc. or indirectly through a HackerBay, Inc. authorized reseller or distributor (a "Reseller").
Please read this EULA agreement carefully before completing the installation process and using the OneUptime software. It provides a license to use the OneUptime software and contains warranty information and liability disclaimers.
If you register for a free trial of the OneUptime software, this EULA agreement will also govern that trial. By clicking "accept" or installing and/or using the OneUptime software, you are confirming your acceptance of the Software and agreeing to become bound by the terms of this EULA agreement.
If you are entering into this EULA agreement on behalf of a company or other legal entity, you represent that you have the authority to bind such entity and its affiliates to these terms and conditions. If you do not have such authority or if you do not agree with the terms and conditions of this EULA agreement, do not install or use the Software, and you must not accept this EULA agreement.
This EULA agreement shall apply only to the Software supplied by HackerBay, Inc. herewith regardless of whether other software is referred to or described herein. The terms also apply to any HackerBay, Inc. updates, supplements, Internet-based services, and support services for the Software, unless other terms accompany those items on delivery. If so, those terms apply. This EULA was created by EULA Template for OneUptime.
License Grant
HackerBay, Inc. hereby grants you a personal, non-transferable, non-exclusive licence to use the OneUptime software on your devices in accordance with the terms of this EULA agreement.
You are permitted to load the OneUptime software under your control. You are responsible for ensuring your device meets the minimum requirements of the OneUptime software.
You are not permitted to:
Edit, alter, modify, adapt, translate or otherwise change the whole or any part of the Software nor permit the whole or any part of the Software to be combined with or become incorporated in any other software, nor decompile, disassemble or reverse engineer the Software or attempt to do any such things
Reproduce, copy, distribute, resell or otherwise use the Software for any commercial purpose
Allow any third party to use the Software on behalf of or for the benefit of any third party
Use the Software in any way which breaches any applicable local, national or international law
use the Software for any purpose that HackerBay, Inc. considers is a breach of this EULA agreement
Intellectual Property and Ownership
HackerBay, Inc. shall at all times retain ownership of the Software as originally downloaded by you and all subsequent downloads of the Software by you. The Software (and the copyright, and other intellectual property rights of whatever nature in the Software, including any modifications made thereto) are and shall remain the property of HackerBay, Inc..
HackerBay, Inc. reserves the right to grant licences to use the Software to third parties.
Termination
This EULA agreement is effective from the date you first use the Software and shall continue until terminated. You may terminate it at any time upon written notice to HackerBay, Inc..
It will also terminate immediately if you fail to comply with any term of this EULA agreement. Upon such termination, the licenses granted by this EULA agreement will immediately terminate and you agree to stop all access and use of the Software. The provisions that by their nature continue and survive will survive any termination of this EULA agreement.
Governing Law
This EULA agreement, and any dispute arising out of or in connection with this EULA agreement, shall be governed by and construed in accordance with the laws of us.
Copyright (C) HackerBay, Inc - All Rights Reserved
Unauthorized copying of this project, via any medium is strictly prohibited
This project is proprietary and confidential

View File

@@ -0,0 +1,349 @@
# OneUptime
OneUptime is one complete SRE and DevOps platform.
OneUptime lets you do:
**Monitoring:** Monitors your website, web apps, APIs, servers and more and give you detailed metrics of things that might be wrong with your infrastructure.
**Status Page:** OneUptime gives you a beautiful and customizable status page for your online business which helps improve transparency with your customers and cuts support costs.
**Tests:** Write automated tests for your website, API's and more and know instantly when they start failing.
**On-Call and Incident Management:** On-Call Management lets you alert the right team at the right time saving you critical time during downtime.
**Performance Monitoring:** Monitor the performance of your apps, servers, APIs, and more and alert your team when any of your performance metrics degrades.
**Website:** https://oneuptime.com
## TL;DR;
```console
helm repo add oneuptime https://oneuptime.com/chart
helm repo update
helm install oneuptime oneuptime/OneUptime
```
Note: `oneuptime` is your release name.
## Introduction
This chart bootstraps a [OneUptime](https://oneuptime.com) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.12+
- Helm 2.11+ or Helm 3.0-beta3+
- PV provisioner support in the underlying infrastructure
- ReadWriteMany volumes for deployment scaling
## Setup SMTP Server (Optional)
By default, oneuptime will ship with it's own default credential configuration for smtp server.
### Setup Private Key
```
# generate private key
openssl genrsa -out private 2048
# Encode it to base64 and export it
export DKIM_PRIVATE_KEY=$(cat private | base64)
```
> If you already have a private key, you can point the export command to it, and export as base64 encoded
### Setup DKIM on DNS TXT Record
```
# This is the private key created in the above step.
chmod 0400 private
openssl rsa -in private -out public -pubout
# value of DKIM DNS record should be...
echo "v=DKIM1;p=$(grep -v '^-' public | tr -d '\n')"
```
> When setting up the DKIM dns txt record (recommended), the selector should be `oneuptime._domainkey` then the value should be the output of the echo command
### Setup DMARC and SPF DNS TXT Record (Optional)
To setup dmarc for the smtp server, you need to create a new dns record with the following values
| Type | Name | Content |
|----------------------|------------------------------------------|---------------------------------------------------------|
| TXT | _dmarc | v=DMARC1; p=reject; adkim=s; aspf=r; rua=mailto:youremail; ruf=mailto:youremail; pct=100 |
> For SPF dns record, you need to setup with appropriate values, if the ip of the smtp mail server is static, you can add that to the spf ip list
### Setup tls Keys
```
# Unix machine
# generate tls_cert.pem and tls_key.pem
# this command will open a prompt for you to fill in your details
openssl req -x509 -nodes -days 2190 -newkey rsa:2048 -keyout tls_key.pem -out tls_cert.pem
# Encode your tls to base64 and export it
export TLS_KEY=$(cat tls_key.pem | base64)
export TLS_CERT=$(cat tls_cert.pem | base64)
# DOMAIN should equal your domain
export DOMAIN=$DOMAIN
```
> If you already have tls_key and tls_cert for your domain, you can point the export command to the tls files to generate base64 encoded value
## Installing the Chart
To install the chart with the release name `oneuptime`:
```console
helm repo add oneuptime https://oneuptime.com/chart
helm repo update
helm install oneuptime --set encryptionKey=ThisEncryptionKeyLengthIs32Size. oneuptime/OneUptime
```
The command deploys OneUptime on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
Please Note: Encyption Key should be 32 lenth in size. Please change it to any random string you like. you need to keep the encryption key safe, ideally in an encrypted vault.
## Uninstalling the Chart
To uninstall/delete the `oneuptime` deployment:
```console
helm uninstall oneuptime
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
The following table lists the configurable parameters of the OneUptime chart and their default values per section/component:
### OneUptime parameters (optional)
You can add multiple hosts for the oneuptime service. It's in this format:
```
oneuptime:
hosts:
host1:
host:
tls:
enabled: false
crt:
key:
```
| Parameter | Description | Default |
|---------------------------|-------------------------------------------------|---------------------------------------------------------|
| `oneuptime.host` | Hostname where you want to run oneuptime on | `*` |
| `oneuptime.tls.enabled` | Enable HTTPS | `false` |
| `oneuptime.tls.crt` | Certificale in Base64 format | `nil` |
| `oneuptime.tls.key` | Key in Base64 format | `nil` |
### Status Page parameters (optional)
You can add multiple hosts for the status page. It's in this format:
```
statusPage:
hosts:
host1:
host:
tls:
enabled: false
crt:
key:
```
**Important:** If you want to run status pages on your subdomain (like status.yourcompany.com). You need to provide OneUptime Host (Yes, `oneuptime.hosts.host1.host` param, NOT `statusPage.hosts.host1.host`). If you specify `statusPage.hosts.host1.host`. Status page will work for that particular host and not for multiple hosts.
| Parameter | Description | Default |
|---------------------------|-------------------------------------------------|---------------------------------------------------------|
| `statusPage.enabled` | Enable Status Page Ingress | `true`
| `statusPage.host` | Hostname where you want to run your Status Page on, for multiple hosts / status page you leave this blank | `*` |
| `statusPage.tls.enabled` | Enable HTTPS | `false` |
| `statusPage.tls.crt` | Certificale in Base64 format | `nil` |
| `statusPage.tls.key` | Key in Base64 format | `nil` |
### Probe parameters [advanced] (optional)
**What are probes?**
Probes are agents / workers / cron-jobs that monitor your websites at every X interval (default: 1 min. You can change that in dashboard when you create a new website to montor). They not just monitor websites, but also other resources like IoT devices, API's and more. Anything that needs to be monitored by an agent will be monitored by probe.
You can create any number of probes here. By default, we create two probes, but you can increase or decrease the count.
| Parameter | Description | Default |
|---------------------------|-------------------------------------------------|---------------------------------------------------------|
| `probes.probe1.port` | Port for probe 1 (specify any unused port) | `*` |
| `probes.probe1.name` | Name of the Probe | `Probe 1` |
| `probes.probe1.key` | Any random key | `sample-key` |
| `probes.probe1.servicePort` | Port to make the probe ping from outside world | `80` |
You can add any number of probe by specifying `probes.probe<N>.<port | name | key | servicePort>` to your values.yaml.
### Rate Limitter parameters [advanced] (optional)
Enable this if you want IP based rate limitting for OneUptime API.
| Parameter | Description | Default |
|---------------------------|-------------------------------------------------|---------------------------------------------------------|
| `rateLimitter.enabled` | Enable API rate limitter | `false` |
| `rateLimitter.requestLimit` | Limit of requests in a particular time window (see below) | `5000` |
| `rateLimitterrequestLimitTimePeriodInMS` | Rate Limitter window in MS | `216000` |
## OneUptime Images [advanced] (optional)
OneUptime Images are loaded from DockerHub by default. Images are public and by default `latest` images are downloaded. We recommend following this tag. OneUptime will handle all the data migration and changes.
```
image:
registry: docker.io # Docker Registry where to pull images from.
repository: oneuptimeproject # OneUptime docker repository.
tag: latest # We recommend `latest` tag.
pullPolicy: Always # We recommend Always
restartPolicy: Always # We recommend Always
```
## Replicas
OneUptime by default will start all containers as `1` repicas. To increase replicaCount set
`replicaCount: <number>`
## MongoDB Values
This is taken from Bitnami Helm Chart. Please refer to https://bitnami.com/stack/mongodb/helm
Here are default values:
```
redis:
redisPort: 6379
image:
registry: docker.io
repository: bitnami/redis
tag: latest
pullPolicy: Always
usePassword: false
persistence:
enabled: true
mountPath: /bitnami/mongodb
size: 20Gi
```
## Redis Values
This is taken from Bitnami Helm Chart. Please refer to https://bitnami.com/stack/redis/helm
Here are default values:
```
mongodb:
image:
registry: docker.io
repository: bitnami/mongodb
tag: latest
pullPolicy: Always
mongodbRootPassword: root
mongodbUsername: oneuptime
mongodbPassword: password
mongodbDatabase: oneuptimedb
replicaSet:
enabled: true
name: rs0
useHostnames: true
key: mongodboneuptime
secondary: 1
arbiter: 1
persistence:
enabled: true
mountPath: /bitnami/mongodb
size: 20Gi
useStatefulSet: true
```
If you want to expose MongoDB out to the internet, run `install` or `upgrade` with --set mongodb.ingress.enabled=true. You'll see an ingress service for mongodb created with which you can access mongodb data on your cluster.
## Microk8s
If you want to install OneUptime on a VM. Add these lines to `sudo crontab -e`
```
# Run this on Reboot.
@reboot sudo curl https://oneuptime.com/chart/install.sh | sudo bash
# Run this once every 24 hours.
0 0 * * * sudo curl https://oneuptime.com/chart/install.sh | sudo bash
```
Reboot the machine and wait for 15 mins.
## Modifying default params
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install oneuptime \
--set global.imageRegistry=docker.io \
oneuptime
```
## Configuration and installation details
### [Rolling VS Immutable tags]
It is strongly recommended to use images tagged with `latest`. OneUptime automatically takes care of data migration if image with tag `latest` updates.
## Persistence
Bitnami MongoDB, Redis charts are used as dependencies which takes care of persistence across cloud platforms.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, Azure, and minikube.
## Update
To update cluster with new version of OneUptime. Please run:
```
VERSION=$(curl https://oneuptime.com/api/version | jq '.server' | tr -d '"')
kubectl delete job oneuptime-init-script || echo "init-script already deleted"
helm upgrade --reuse-values oneuptime oneuptime/OneUptime \
--set image.tag=$VERSION
```
OneUptime automatically takes care of data migration.
## Things to note
- If you do not specify TLS config, we will self-sign a certificate for you.
You can also use Cloudflare Universal SSL and run OneUptime service on Port 80 (when you're evaluating. NOT recommended for production)
## Support
If you need any help with deployments, please reach out to our engineering support team at support@oneuptime.com and we'll get back to you in less than 1 business day.
## License
The project will be deployed on Evaluation License by default. For a commercial license, please reach out to sales@oneuptime.com.
If you already have a commercial license. Please enter the license key on your dashboard (after you log in to OneUptime)

Binary file not shown.

View File

@@ -0,0 +1,95 @@
apiVersion: v1
entries:
mongodb:
- annotations:
category: Database
apiVersion: v2
appVersion: 4.4.8
created: "2021-09-10T14:09:36.396546+01:00"
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
description: NoSQL document-oriented database that stores JSON-like documents with dynamic schemas, simplifying the integration of data in content-driven applications.
digest: 6c88c641c7aeec1ae15bb03841d704b1e3ff709ca7c792ac9a56c89e8296a64e
home: https://github.com/bitnami/charts/tree/master/bitnami/mongodb
icon: https://bitnami.com/assets/stacks/mongodb/img/mongodb-stack-220x234.png
keywords:
- mongodb
- database
- nosql
- cluster
- replicaset
- replication
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: mongodb
sources:
- https://github.com/bitnami/bitnami-docker-mongodb
- https://mongodb.org
urls:
- charts/mongodb-10.23.10.tgz
version: 10.23.10
nginx-ingress-controller:
- annotations:
category: Infrastructure
apiVersion: v2
appVersion: 0.41.2
created: "2021-09-10T14:09:36.417987+01:00"
description: Chart for the nginx Ingress controller
digest: a40b7483b6b11c8d9c36db0c583d29658b2d484f04175cb31c6202f4d8be37be
home: https://github.com/bitnami/charts/tree/master/bitnami/nginx-ingress-controller
icon: https://bitnami.com/assets/stacks/nginx-ingress-controller/img/nginx-ingress-controller-stack-220x234.png
keywords:
- ingress
- nginx
- http
- web
- www
- reverse proxy
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: nginx-ingress-controller
sources:
- https://github.com/bitnami/bitnami-docker-nginx-ingress-controller
- https://github.com/kubernetes/ingress-nginx
urls:
- charts/nginx-ingress-controller-6.0.1.tgz
version: 6.0.1
redis:
- annotations:
category: Database
apiVersion: v2
appVersion: 6.2.5
created: "2021-09-10T14:09:36.428361+01:00"
dependencies:
- name: common
repository: https://charts.bitnami.com/bitnami
tags:
- bitnami-common
version: 1.x.x
description: Open source, advanced key-value store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets and sorted sets.
digest: bc737556010607f37747fda35fe4739ed0447bd75f785652d1713a75b643028f
home: https://github.com/bitnami/charts/tree/master/bitnami/redis
icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png
keywords:
- redis
- keyvalue
- database
maintainers:
- email: containers@bitnami.com
name: Bitnami
- email: cedric@desaintmartin.fr
name: desaintmartin
name: redis
sources:
- https://github.com/bitnami/bitnami-docker-redis
- http://redis.io/
urls:
- charts/redis-14.8.8.tgz
version: 10.5.11
generated: "2021-09-10T14:09:36.3769+01:00"

View File

@@ -0,0 +1,85 @@
============================================
IMPORTANT: After Installation Steps
============================================
** Thank you for installing OneUptime **
** Please be patient while the chart is being deployed **
** This usually takes few minutes or more **
To access your OneUptime app from steps below:
{{- if eq (index .Values "nginx-ingress-controller" "service" "type") "LoadBalancer" }}
============================================
Make sure external IP's are assigned.
============================================
Please run these commands to get OneUptime URL
$ kubectl get svc {{ .Release.Name }}-nginx-ingress-controller --namespace={{ .Release.Namespace }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
If the load balancer did not assign an external IP yet (if the IP is still pending).
Retry this command after few mins.
{{- end}}
============================================
Sign Up
============================================
{{- if eq (index .Values "nginx-ingress-controller" "service" "type") "LoadBalancer" }}
Go to the External IP (generated from step 1) from your browser and sign up a new admin account.
{{- else}}
Go to the External IP of your server from your browser and sign up a new admin account.
{{- end}}
This is your master admin account (and not a user account).
To create a user account. Please follow steps below.
============================================
Setup Email
============================================
When you're done signing up the admin account. Head over to "Settings" -> "Email"
Add your SMTP server details here to enable email alerts.
============================================
Setup Twilio
============================================
When you're done signing up the admin account. Head over to "Settings" -> "Call and SMS"
Add your Twilio Settings here to enable call and SMS alert.
============================================
Create User
============================================
On the Admin Dahboard, go to the "Users" menu and add a new user.
Log out of the admin account, and log in with a user account to access User's OneUptime Dashboard.
{{- if not $.Values.isThirdPartyBilling }}
============================================
STEP 5: Buy License
============================================
OneUptime which you just installed runs on an evaluation license.
Please contact us at sales@oneuptime.com to buy a commercial license.
We support companies of all sizes.
Once you buy the commercial license,
you can enter that license key on your admin dashboard.
{{- end }}
============================================
Support and Demo
============================================
Demo:
If you're looking for a personlized OneUptime demo.
Please email us at demo@oneuptime.com to schedule one.
Support and Help:
If you're looking for help with anything,
Please email us at support@oneuptime.com and we'll get back to you in less than 1 business day.
Thank you for installing OneUptime!

View File

@@ -0,0 +1,40 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "oneuptime.mongodbConnectionString" -}}
{{ printf "mongodb://%s:%s@%s-%s.%s-%s.%s.%s:%s,%s-%s.%s-%s.%s.%s:%s/%s?replicaSet=%s" $.Values.mongodb.auth.username $.Values.mongodb.auth.password $.Release.Name "mongodb-0" $.Release.Name "mongodb-headless" $.Release.Namespace "svc.cluster.local" "27017" $.Release.Name "mongodb-1" $.Release.Name "mongodb-headless" $.Release.Namespace "svc.cluster.local" "27017" $.Values.mongodb.auth.database $.Values.mongodb.replicaSetName }}
{{- end -}}
{{- define "oneuptime.internalSmtpServer" -}}
{{ printf "%s-haraka.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.redisHost" -}}
{{ printf "%s-redis-master.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.backendHost" -}}
{{ printf "%s-backend.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.oneuptimeHost" -}}
{{ printf "%s-backend.%s" $.Values.oneuptime.host }}
{{- end -}}
{{- define "oneuptime.serverUrl" -}}
{{ printf "http://%s-backend.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.scriptRunnerUrl" -}}
{{ printf "http://%s-script.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.dataIngestorUrl" -}}
{{ printf "http://%s-ingestor.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}
{{- define "oneuptime.realtimeUrl" -}}
{{ printf "http://%s-realtime.%s.%s" $.Release.Name $.Release.Namespace "svc.cluster.local" }}
{{- end -}}

View File

@@ -0,0 +1,87 @@
############-----ACCOUNTS----#############################
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "accounts" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "accounts" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "accounts" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "accounts" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "accounts" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "accounts" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
{{- if .Values.saas.isSaasService }}
- name: STRIPE_PUBLIC_KEY
value: {{ .Values.saas.stripe.publicKey }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: AMPLITUDE_PUBLIC_KEY
value: {{ .Values.saas.amplitude.key }}
{{- end }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.accounts.dsn }}
- name: DISABLE_SIGNUP
value: {{ .Values.disableSignup | quote }}
ports:
- containerPort: {{ .Values.host.accountsPort }}
hostPort: {{ .Values.host.accountsPort }}
name: {{ printf "%s-%s" .Release.Name "accounts" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Accounts Service
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "accounts" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "accounts" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.accountsServicePort }}
targetPort: {{ .Values.host.accountsPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "accounts" }}
type: ClusterIP
---
###########################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "accounts" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "accounts" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,85 @@
############-ADMIN-DASHBOARD-#########
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "admin" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "admin" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "admin" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "admin" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "admin-dashboard" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "admin" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
{{- if .Values.saas.isSaasService }}
- name: IS_SAAS_SERVICE
value: 'true'
{{- else }}
- name: LICENSE_URL
value: {{ .Values.oneuptime.licensingUrl }}
{{- end }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.adminDashboard.dsn }}
- name: IS_THIRD_PARTY_BILLING
value: {{ .Values.isThirdPartyBilling | quote }}
- name: INTERNAL_SMTP_SERVER
value: {{ template "oneuptime.internalSmtpServer" . }}
ports:
- containerPort: {{ .Values.host.adminDashboardPort }}
hostPort: {{ .Values.host.adminDashboardPort }}
name: {{ printf "%s-%s" .Release.Name "admin" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "admin" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "admin" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.adminDashboardServicePort }}
targetPort: {{ .Values.host.adminDashboardPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "admin" }}
type: ClusterIP
---
##################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "admin" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "admin" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,88 @@
####################################
#IMPORTANT: Kube Reosurces below are ONLY for SaaS service
####################################
{{- if .Values.saas.isSaasService }}
###########-----API-DOCS------##############
# OneUptime API docs Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "api-docs" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "api-docs" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "api-docs" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "api-docs" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: IS_SAAS_SERVICE
value: 'true'
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.apiDocs.dsn }}
ports:
- containerPort: {{ .Values.host.apiDocsPort }}
hostPort: {{ .Values.host.apiDocsPort }}
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime API docs Service
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "api-docs" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.apiDocsServicePort }}
protocol: TCP
targetPort: {{ .Values.host.apiDocsPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "api-docs" }}
type: ClusterIP
---
###########################################
{{- end }}
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "api-docs" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,88 @@
#######-------- APPLICATION SCANNER --------#########
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "app-scan" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "app-scan" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "app-scan" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "application-scanner" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.applicationScanner.dsn }}
- name: PORT
value: {{ .Values.host.applicationScannerPort | quote }}
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" $.Release.Name "configmap" }}
key: clusterkey
ports:
- containerPort: {{ .Values.host.applicationScannerPort }}
hostPort: {{ .Values.host.applicationScannerPort }}
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Application Scanner Deployment
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "app-scan" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.applicationScannerServicePort }}
targetPort: {{ .Values.host.applicationScannerPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "app-scan" }}
type: ClusterIP
---
###################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "app-scan" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,213 @@
###########-----BACKEND------##############
# OneUptime Backend Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "backend" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "backend" }}
{{- if $.Values.saas.isRunningOnGCPMarketplace }}
app.kubernetes.io/name: "$name"
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: {{ printf "%s-%s" .Release.Name "backend" }}
{{- end }}
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "backend" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "backend" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "backend" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "backend" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 450m
limits:
cpu: 700m
env:
- name: JWT_SECRET
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: encryptionkey
- name: ENCRYPTION_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: encryptionkey
- name: MONGO_URL
value: {{ template "oneuptime.mongodbConnectionString" . }}
{{- if eq .Values.mongodb.architecture "replicaset" }}
- name: IS_MONGO_REPLICA_SET
value: {{ true | quote }}
- name: MONGO_REPLICA_SET_NAME
value: {{ .Values.mongodb.replicaSetName }}
{{- end }}
- name: REDIS_HOST
value: {{ template "oneuptime.redisHost" . }}
- name: ONEUPTIME_HOST
value: {{ .Values.oneuptime.hosts.host1.host }}
- name: BACKEND_PROTOCOL
value: {{ .Values.backendProtocol }}
- name: REDIS_PORT
value: {{ .Values.redis.redisPort | quote }}
- name: PORT
value: {{ .Values.host.backendPort | quote }}
- name: EMAIL_VERIFY_TIME
value: '3600'
- name: BACKEND_HOST
value: {{ .Values.host.backend }}
- name: PUSHNOTIFICATION_PRIVATE_KEY
value: {{ .Values.pushNotification.privateKey }}
- name: PUSHNOTIFICATION_PUBLIC_KEY
value: {{ .Values.pushNotification.publicKey }}
- name: PUSHNOTIFICATION_URL
value: {{ .Values.pushNotification.url }}
- name: SCRIPT_RUNNER_URL
value: {{ template "oneuptime.scriptRunnerUrl" $ }}
- name: REALTIME_URL
value: {{ template "oneuptime.realtimeUrl" $ }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.backend.dsn }}
- name: IS_TESTING
value: {{ .Values.isTesting | quote }}
- name: RATE_LIMITTER_TIME_PERIOD_IN_MS
value: {{ .Values.rateLimitter.requestLimitTimePeriodInMS | quote }}
- name: RATE_LIMITTER_REQUEST_LIMIT
value: {{ .Values.rateLimitter.requestLimit | quote }}
- name: RATE_LIMITTER_ENABLED
value: {{ .Values.rateLimitter.enabled | quote }}
- name: ADMIN_EMAIL
value: {{ .Values.oneuptime.admin.email }}
- name: ADMIN_PASSWORD
value: {{ .Values.oneuptime.admin.password }}
- name: DISABLE_SIGNUP
value: {{ .Values.disableSignup | quote }}
- name: INTERNAL_SMTP_SERVER
value: {{ template "oneuptime.internalSmtpServer" . }}
- name: INTERNAL_SMTP_PORT
value: {{ quote .Values.host.harakaServicePort }}
- name: INTERNAL_SMTP_USER
value: {{ .Values.haraka.user }}
- name: INTERNAL_SMTP_PASSWORD
value: {{ .Values.haraka.password }}
- name: INTERNAL_SMTP_FROM
value: {{ .Values.haraka.fromEmail }}
- name: INTERNAL_SMTP_NAME
value: {{ .Values.haraka.fromName }}
{{- if .Values.newRelic.backend.licenseKey }}
- name: NEW_RELIC_LICENSE_KEY
value: {{ .Values.newRelic.backend.licenseKey }}
{{- end }}
{{- if .Values.newRelic.backend.appName }}
- name: NEW_RELIC_APPLICATION_NAME
value: {{ .Values.newRelic.backend.appName}}
{{- end }}
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: clusterkey
{{- if .Values.saas.isSaasService }}
- name: STRIPE_PUBLIC_KEY
value: {{ .Values.saas.stripe.publicKey }}
- name: STRIPE_PRIVATE_KEY
value: {{ .Values.saas.stripe.privateKey }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: TWITTER_BEARER_TOKEN
value: {{ .Values.saas.twitter.bearertoken }}
- name: AIRTABLE_API_KEY
value: {{ .Values.saas.airtable.key }}
- name: AIRTABLE_BASE_ID
value: {{ .Values.saas.airtable.baseId }}
- name: IS_THIRD_PARTY_BILLING
value: {{ .Values.isThirdPartyBilling | quote }}
- name: SLACK_ERROR_LOG_WEBHOOK
value: {{ .Values.saas.slackErrorLog.webhook }}
- name: SLACK_ERROR_LOG_CHANNEL
value: {{ .Values.saas.slackErrorLog.channel }}
{{- end }}
ports:
- containerPort: {{ .Values.host.backendPort }}
hostPort: {{ .Values.host.backendPort }}
name: {{ printf "%s-%s" .Release.Name "backend" }}
{{- if $.Values.saas.isRunningOnGCPMarketplace }}
- name: ubbagent
image: $imageUbbagent
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: AGENT_CONFIG_FILE
value: "/etc/ubbagent/config.yaml"
- name: AGENT_LOCAL_PORT
value: "4567"
- name: AGENT_ENCODED_KEY
valueFrom:
secretKeyRef:
name: $reportingSecret
key: reporting-key
- name: AGENT_CONSUMER_ID
valueFrom:
secretKeyRef:
name: $reportingSecret
key: consumer-id
volumeMounts:
- name: ubbagent-config
mountPath: /etc/ubbagent
volumes:
- name: ubbagent-config
configMap:
name: ubbagent-config
{{- end }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Backend Deployment
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "backend" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "backend" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.backendServicePort }}
targetPort: {{ .Values.host.backendPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "backend" }}
type: ClusterIP
---
##################################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "backend" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "backend" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,11 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
data:
clusterkey: {{ randAlphaNum 15 | quote }}
encryptionkey: {{ .Values.encryptionKey | quote }}

View File

@@ -0,0 +1,88 @@
#######-------- CONTAINER SCANNER --------#########
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "cont-scan" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "cont-scan" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "cont-scan" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "container-scanner" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.containerScanner.dsn }}
- name: PORT
value: {{ .Values.host.containerScannerPort | quote }}
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" $.Release.Name "configmap" }}
key: clusterkey
ports:
- containerPort: {{ .Values.host.containerScannerPort }}
hostPort: {{ .Values.host.containerScannerPort }}
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Container Scanner Deployment
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "cont-scan" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.containerScannerServicePort }}
targetPort: {{ .Values.host.containerScannerPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "cont-scan" }}
type: ClusterIP
---
###################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "cont-scan" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,93 @@
############-----DASHBOARD----#############################
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "dashboard" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "dashboard" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "dashboard" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "dashboard" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: DOMAIN
value: {{ .Values.domain }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.dashboard.dsn }}
- name: PUSHNOTIFICATION_PUBLIC_KEY
value: {{ .Values.pushNotification.publicKey }}
{{- if .Values.statusPageDomain }}
- name: STATUSPAGE_DOMAIN
value: {{ .Values.statusPageDomain }}
{{- end }}
{{- if .Values.saas.isSaasService }}
- name: STRIPE_PUBLIC_KEY
value: {{ .Values.saas.stripe.publicKey }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: AMPLITUDE_PUBLIC_KEY
value: {{ .Values.saas.amplitude.key }}
{{- end }}
ports:
- containerPort: {{ .Values.host.dashboardPort }}
hostPort: {{ .Values.host.dashboardPort }}
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Dashoard Service
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "dashboard" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.dashboardServicePort }}
targetPort: {{ .Values.host.dashboardPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "dashboard" }}
type: ClusterIP
---
##########################################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "dashboard" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,91 @@
########-ingestor-#################
#ingestor service and deployement
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "ingestor" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "ingestor" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "ingestor" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "data-ingestor" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 450m
limits:
cpu: 700m
env:
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: PORT
value: {{ .Values.host.dataIngestorPort | quote }}
- name: MONGO_URL
value: {{ template "oneuptime.mongodbConnectionString" . }}
- name: SCRIPT_RUNNER_URL
value: {{ template "oneuptime.scriptRunnerUrl" $ }}
- name: REALTIME_URL
value: {{ template "oneuptime.realtimeUrl" $ }}
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: clusterkey
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.dataIngestor.dsn }}
ports:
- containerPort: {{ .Values.host.dataIngestorPort }}
hostPort: {{ .Values.host.dataIngestorPort }}
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "ingestor" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.dataIngestorServicePort }}
targetPort: {{ .Values.host.dataIngestorPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "ingestor" }}
type: ClusterIP
---
###########################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "ingestor" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,65 @@
####################################
#IMPORTANT: Kube Reosurces below are ONLY for SaaS service
####################################
{{- if .Values.saas.exposeInitScriptContainer }}
###########-----init-script------#################
# OneUptime init-script
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "init-script" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "init-script" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "init-script" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "init-script" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "init-script" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "init-script" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: IS_SAAS_SERVICE
value: 'true'
- name: NODE_ENV
value: 'development'
- name: ONEUPTIME_HOST
value: {{ .Values.oneuptime.hosts.host1.host }}
ports:
- containerPort: {{ .Values.host.initScriptPort }}
hostPort: {{ .Values.host.initScriptPort }}
name: {{ printf "%s-%s" .Release.Name "init-script" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "init-script" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "init-script" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.initScriptServicePort }}
protocol: TCP
targetPort: {{ .Values.host.initScriptPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "init-script" }}
type: ClusterIP
---
###########################################
{{- end }}

View File

@@ -0,0 +1,33 @@
# We don't need this again to expose our mongodb to the outside world
# All the configuration should be done in the values.yaml file
{{- if .Values.mongodb.ingress.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ printf "%s-%s" $.Release.Name "mongo-external" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
data:
27017: {{ printf "%s/$s" $.Release.Namespace "fi-mongodb:27017" }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ printf "%s-%s" $.Release.Name "mongo-ingress" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
type: LoadBalancer
ports:
- name: proxied-tcp-27017
port: 27017
targetPort: 27017
protocol: TCP
selector:
app: mongodb
{{- end }}
---

View File

@@ -0,0 +1,86 @@
{{- if .Values.haraka.domain -}}
############-----HARAKA----#############################
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "haraka" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "haraka" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "haraka" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "haraka" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "haraka" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "haraka" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: SMTP_USER
value: {{ .Values.haraka.user }}
- name: SMTP_PASSWORD
value: {{ .Values.haraka.password }}
- name: DOMAIN
value: {{ .Values.haraka.domain }}
- name: DKIM_PRIVATE_KEY
value: {{ .Values.haraka.dkimPrivateKey }}
- name: TLS_CERT
value: {{ .Values.haraka.tlsCert }}
- name: TLS_KEY
value: {{ .Values.haraka.tlsKey }}
ports:
- containerPort: {{ .Values.host.harakaPort }}
hostPort: {{ .Values.host.harakaPort }}
name: {{ printf "%s-%s" .Release.Name "haraka" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Haraka Service
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "haraka" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "haraka" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.harakaServicePort }}
targetPort: {{ .Values.host.harakaPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "haraka" }}
type: ClusterIP
---
##########################################################
{{- end -}}
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "haraka" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "haraka" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,85 @@
####################################
#IMPORTANT: Kube Reosurces below are ONLY for SaaS service
####################################
{{- if .Values.saas.isSaasService }}
###########-----HELM CHART------#################
# OneUptime HELM CHART
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "helm-chart" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "helm-chart" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "helm-chart" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "helm-chart" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "helm-chart" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "helm-chart" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: IS_SAAS_SERVICE
value: 'true'
- name: NODE_ENV
value: 'production'
ports:
- containerPort: {{ .Values.host.helmChartPort }}
hostPort: {{ .Values.host.helmChartPort }}
name: {{ printf "%s-%s" .Release.Name "helmchart" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "helm-chart" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "helm-chart" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.helmChartServicePort }}
protocol: TCP
targetPort: {{ .Values.host.helmChartPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "helm-chart" }}
type: ClusterIP
---
###########################################
{{- end }}
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "helm-chart" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "helm-chart" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,88 @@
####################################
#IMPORTANT: Kube Reosurces below are ONLY for SaaS service
####################################
{{- if .Values.saas.isSaasService }}
###########-----HOME------#################
# OneUptime Home
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "home" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "home" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "home" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "home" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "home" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "home" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: IS_SAAS_SERVICE
value: 'true'
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.home.dsn }}
- name: ONEUPTIME_HOST
value: {{ .Values.oneuptime.hosts.host1.host }}
ports:
- containerPort: {{ .Values.host.homePort }}
hostPort: {{ .Values.host.homePort }}
name: {{ printf "%s-%s" .Release.Name "home" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "home" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "home" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.homeServicePort }}
protocol: TCP
targetPort: {{ .Values.host.homePort }}
selector:
app: {{ printf "%s-%s" .Release.Name "home" }}
type: ClusterIP
---
###########################################
{{- end }}
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "home" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "home" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,37 @@
############----TEST-SERVER-INGRESS--#####################################
{{- if .Values.saas.isSaasService }}
{{- range $key, $value := $.Values.httpTestServer.hosts }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/ssl-redirect: "true"
name: {{ printf "%s-%s-%s" $.Release.Name $key "test" }}
namespace: {{ $.Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
{{- if $value.tls.enabled }}
tls:
- hosts:
- {{ $value.host }}
secretName: {{ printf "%s-%s-%s" $.Release.Name $key "testtls" }}
{{- end }}
rules:
{{- if $value.host }}
- host: {{ $value.host }}
http:
{{- else }}
- http:
{{- end }}
paths:
- path: /
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "test" }}
servicePort: 80
---
{{- end }}
{{- end }}
##################################

View File

@@ -0,0 +1,20 @@
{{- if .Values.saas.isSaasService }}
{{- range $key, $value := $.Values.httpTestServer.hosts }}
{{- if $value.tls.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s-%s" $.Release.Name $key "testtls" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
tls.crt: {{ $value.tls.crt }}
tls.key: {{ $value.tls.key }}
---
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,83 @@
###########-----TEST-SERVER------#################
{{- if .Values.saas.isSaasService }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "test" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "test" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "test" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "test" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "http-test-server" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "test" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: ONEUPTIME_HOST
value: {{ .Values.oneuptime.hosts.host1.host }}
{{- if .Values.saas.isSaasService }}
- name: IS_SAAS_SERVICE
value: 'true'
{{- end }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.httpTestServer.dsn }}
ports:
- containerPort: {{ .Values.host.httpTestServerPort }}
hostPort: {{ .Values.host.httpTestServerPort }}
name: {{ printf "%s-%s" .Release.Name "test" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "test" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "test" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.httpTestServerServicePort }}
targetPort: {{ .Values.host.httpTestServerPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "test" }}
type: ClusterIP
---
{{- end }}
########################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "test" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "test" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,110 @@
############----INGRESS---#####################################
{{- range $key, $value := $.Values.oneuptime.hosts }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
meta.helm.sh/release-name: {{ printf "%s" $.Release.Name }}
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/affinity-mode: "balanced"
nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "false"
nginx.ingress.kubernetes.io/session-cookie-name: "fi-ingress-cookie"
nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
ingress.kubernetes.io/session-cookie-hash: "sha1"
nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"
name: {{ printf "%s-%s-%s" $.Release.Name $key "ingress" }}
namespace: {{ $.Release.Namespace }}
spec:
{{- if $value.tls.enabled }}
tls:
- hosts:
- {{ $value.host }}
secretName: {{ printf "%s-%s-%s" $.Release.Name $key "tls" }}
{{- end }}
rules:
{{- if $value.host }}
- host: {{ $value.host }}
http:
{{- else }}
- http:
{{- end }}
paths:
- path: /accounts
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "accounts" }}
servicePort: 80
- path: /data-ingestor
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "ingestor" }}
servicePort: 80
- path: /dashboard
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "dashboard" }}
servicePort: 80
- path: /admin
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "admin" }}
servicePort: 80
- path: /api
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "backend" }}
servicePort: 80
- path: /realtime
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "realtime" }}
servicePort: 80
- path: /status-page
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "status" }}
servicePort: 80
- path: /haraka
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "haraka" }}
servicePort: 80
- path: /script
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "script" }}
servicePort: 80
{{- range $probeKey, $probeValue := $.Values.probes }}
- path: {{ printf "/%s" $probeKey }}
backend:
serviceName: {{ printf "%s-%s" $.Release.Name $probeKey }}
servicePort: 80
{{- end }}
{{- if $.Values.saas.isSaasService }}
- path: /
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "home" }}
servicePort: 80
- path: /docs
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "api-docs" }}
servicePort: 80
- path: /license
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "licensing" }}
servicePort: 80
- path: /chart
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "helm-chart" }}
servicePort: 80
{{- else }}
- path: /
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "accounts" }}
servicePort: 80
{{- end }}
{{- if $.Values.saas.exposeInitScriptContainer }}
- path: /
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "init-script" }}
servicePort: 80
{{- end }}
---
{{- end }}
##########################################################################

View File

@@ -0,0 +1,29 @@
########-INIT-SCRIPT-##########
apiVersion: batch/v1
kind: Job
metadata:
name: {{ printf "%s-%s" .Release.Name "init-script" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
template:
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "init-script" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "init-script" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
env:
- name: MONGO_URL
value: {{ template "oneuptime.mongodbConnectionString" . }}
- name: NODE_ENV
value: 'production'
- name: ENCRYPTION_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: encryptionkey
restartPolicy: Never
---
####################################

View File

@@ -0,0 +1,90 @@
####################################
#IMPORTANT: Kube Reosurces below are ONLY for SaaS service
####################################
{{- if .Values.saas.isSaasService }}
###########-----LICENSING------##############
# Licensing
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "licensing" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "licensing" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "licensing" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "licensing" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "licensing" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "licensing" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: IS_SAAS_SERVICE
value: 'true'
- name: AIRTABLE_API_KEY
value: {{ .Values.saas.licensing.airtable.key }}
- name: AIRTABLE_BASE_ID
value: {{ .Values.saas.licensing.airtable.baseId }}
- name: TOKEN_SECRET
value: {{ .Values.saas.licensing.tokenSecret }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.licensing.dsn }}
ports:
- containerPort: {{ .Values.host.licensingPort }}
hostPort: {{ .Values.host.licensingPort }}
name: {{ printf "%s-%s" .Release.Name "licensing" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "licensing" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "licensing" }}
spec:
ports:
- port: {{ .Values.host.licensingServicePort }}
protocol: TCP
targetPort: {{ .Values.host.licensingPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "licensing" }}
type: ClusterIP
---
{{- end }}
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "licensing" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "licensing" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,88 @@
#######-------- LIGHTHOUSE RUNNER --------#########
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "lighthouse" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "lighthouse" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "lighthouse" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "lighthouse-runner" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.lighthouse.dsn }}
- name: PORT
value: {{ .Values.host.lighthouseRunnerPort | quote }}
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" $.Release.Name "configmap" }}
key: clusterkey
ports:
- containerPort: {{ .Values.host.lighthouseRunnerPort }}
hostPort: {{ .Values.host.lighthouseRunnerPort }}
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Lighthouse Runner Deployment
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "lighthouse" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.lighthouseRunnerServicePort }}
targetPort: {{ .Values.host.lighthouseRunnerPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "lighthouse" }}
type: ClusterIP
---
###################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "lighthouse" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,195 @@
# Default configuration for a metric server
# https://github.com/kubernetes-sigs/metrics-server
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: system:aggregated-metrics-reader
rules:
- apiGroups:
- metrics.k8s.io
resources:
- pods
- nodes
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
rules:
- apiGroups:
- ""
resources:
- pods
- nodes
- nodes/stats
- namespaces
- configmaps
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server-auth-reader
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: metrics-server:system:auth-delegator
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: metrics-server
name: system:metrics-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:metrics-server
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
k8s-app: metrics-server
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
k8s-app: metrics-server
name: metrics-server
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: metrics-server
strategy:
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
k8s-app: metrics-server
spec:
containers:
- args:
- --cert-dir=/tmp
- --secure-port=443
- --kubelet-preferred-address-types=Hostname,InternalDNS,InternalIP,ExternalDNS,ExternalIP
- --kubelet-use-node-status-port
- --metric-resolution=15s
image: k8s.gcr.io/metrics-server/metrics-server:v0.5.0
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /livez
port: https
scheme: HTTPS
periodSeconds: 10
name: metrics-server
ports:
- containerPort: 443
name: https
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /readyz
port: https
scheme: HTTPS
initialDelaySeconds: 20
periodSeconds: 10
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
volumeMounts:
- mountPath: /tmp
name: tmp-dir
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
---
apiVersion: apiregistration.k8s.io/v1
kind: APIService
metadata:
labels:
k8s-app: metrics-server
name: v1beta1.metrics.k8s.io
spec:
group: metrics.k8s.io
groupPriorityMinimum: 100
insecureSkipTLSVerify: true
service:
name: metrics-server
namespace: kube-system
version: v1beta1
versionPriority: 100

View File

@@ -0,0 +1,93 @@
########-PROBE-#################
#Probe service and deployement
{{- range $key, $value := $.Values.probes }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" $.Release.Name $key }}
namespace: {{ $.Release.Namespace }}
labels:
app: {{ printf "%s-%s" $.Release.Name $key }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" $.Release.Name $key }}
replicas: 1 # THIS SHOULD STRICTLY BE 1
template:
metadata:
labels:
app: {{ printf "%s-%s" $.Release.Name $key }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" $.Values.image.registry $.Values.image.repository "probe" $.Values.image.tag }}
name: {{ printf "%s-%s" $.Release.Name $key }}
imagePullPolicy: {{ $.Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: DATA_INGESTOR_URL
value: {{ template "oneuptime.dataIngestorUrl" $ }}
- name: PORT
value: {{ $value.port | quote }}
- name: PROBE_NAME
value: {{ $value.name }}
- name: PROBE_KEY
value: {{ $value.key }}
{{- if $.Values.newRelic.probe.licenseKey }}
- name: NEW_RELIC_LICENSE_KEY
value: {{ $.Values.newRelic.probe.licenseKey }}
{{- end }}
{{- if $.Values.newRelic.probe.appName }}
- name: NEW_RELIC_APPLICATION_NAME
value: {{ $.Values.newRelic.probe.appName}}
{{- end }}
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" $.Release.Name "configmap" }}
key: clusterkey
{{- if $.Values.saas.isSaasService }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: SLACK_ERROR_LOG_WEBHOOK
value: {{ $.Values.saas.slackErrorLog.webhook }}
- name: SLACK_ERROR_LOG_CHANNEL
value: {{ $.Values.saas.slackErrorLog.channel }}
{{- end }}
- name: NODE_ENV
value: {{ $.Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ $.Values.sentry.probe.dsn }}
ports:
- containerPort: {{ $value.port }}
hostPort: {{ $value.port }}
name: {{ printf "%s-%s" $.Release.Name $key }}
restartPolicy: {{ $.Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" $.Release.Name $key }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" $.Release.Name $key }}
namespace: {{ $.Release.Namespace }}
spec:
ports:
- port: {{ $value.servicePort }}
targetPort: {{ $value.port }}
selector:
app: {{ printf "%s-%s" $.Release.Name $key }}
type: ClusterIP
---
###########################
{{- end }}

View File

@@ -0,0 +1,83 @@
########-REALTIME-#################
#realtime service and deployement
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "realtime" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "realtime" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "realtime" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "realtime" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "realtime" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "realtime" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 450m
limits:
cpu: 700m
env:
- name: PORT
value: {{ .Values.host.realtimePort | quote }}
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" .Release.Name "configmap" }}
key: clusterkey
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.realtime.dsn }}
ports:
- containerPort: {{ .Values.host.realtimePort }}
hostPort: {{ .Values.host.realtimePort }}
name: {{ printf "%s-%s" .Release.Name "realtime" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "realtime" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "realtime" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.realtimeServicePort }}
targetPort: {{ .Values.host.realtimePort }}
selector:
app: {{ printf "%s-%s" .Release.Name "realtime" }}
type: ClusterIP
---
###########################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "realtime" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "realtime" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,87 @@
#######------SCRIPT RUNNER--------##########
# OneUptime Script Runner Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "script" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "script" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "script" }}
replicas: 1
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "script" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "script-runner" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "script" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.scriptRunner.dsn }}
- name: SERVER_URL
value: {{ template "oneuptime.serverUrl" $ }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: CLUSTER_KEY
valueFrom:
configMapKeyRef:
name: {{ printf "%s-%s" $.Release.Name "configmap" }}
key: clusterkey
ports:
- containerPort: {{ .Values.host.scriptRunnerPort }}
hostPort: {{ .Values.host.scriptRunnerPort }}
name: {{ printf "%s-%s" .Release.Name "script" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
# OneUptime Script Runner Deployment
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "script" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "script" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.scripRunnerServicePort }}
targetPort: {{ .Values.host.scriptRunnerPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "script" }}
type: ClusterIP
---
###################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "script" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "script" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,104 @@
###########-----STATUS PAGE------#################
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ printf "%s-%s" .Release.Name "status" }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ printf "%s-%s" .Release.Name "status" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
selector:
matchLabels:
app: {{ printf "%s-%s" .Release.Name "status" }}
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "status" }}
spec:
containers:
- image: {{ printf "%s/%s/%s:%s" .Values.image.registry .Values.image.repository "status-page" .Values.image.tag }}
name: {{ printf "%s-%s" .Release.Name "status" }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
cpu: 250m
limits:
cpu: 500m
env:
- name: ONEUPTIME_HOST
value: {{ .Values.oneuptime.hosts.host1.host }}
- name: NODE_ENV
value: {{ .Values.nodeEnv }}
- name: SENTRY_DSN
value: {{ .Values.sentry.statusPage.dsn }}
- name: BACKEND_URL
value: {{ template "oneuptime.backendHost" . }}
- name: MONGO_URL
value: {{ template "oneuptime.mongodbConnectionString" . }}
{{- if .Values.saas.isSaasService }}
- name: IS_SAAS_SERVICE
value: 'true'
- name: STATUSPAGE_CERT
value: {{ .Values.statusPage.cert }}
- name: STATUSPAGE_PRIVATEKEY
value: {{ .Values.statusPage.key }}
{{- end }}
{{- if .Values.backendProtocol }}
- name: BACKEND_PROTOCOL
value: {{ .Values.backendProtocol }}
{{- end }}
ports:
- containerPort: {{ .Values.host.statusPageHttpPort }}
hostPort: {{ .Values.host.statusPageHttpPort }}
name: {{ printf "%s-%s" .Release.Name "status-http" }}
- containerPort: {{ .Values.host.statusPageHttpsPort }}
hostPort: {{ .Values.host.statusPageHttpsPort }}
name: {{ printf "%s-%s" .Release.Name "status-https" }}
restartPolicy: {{ .Values.image.restartPolicy }}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: {{ printf "%s-%s" .Release.Name "status" }}
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
name: {{ printf "%s-%s" .Release.Name "status" }}
namespace: {{ .Release.Namespace }}
spec:
ports:
- port: {{ .Values.host.statusPageHttpServicePort }}
name: "http"
targetPort: {{ .Values.host.statusPageHttpPort }}
- port: {{ .Values.host.statusPageHttpsServicePort }}
name: "https"
targetPort: {{ .Values.host.statusPageHttpsPort }}
selector:
app: {{ printf "%s-%s" .Release.Name "status" }}
{{- if .Values.saas.isSaasService }}
# Load balancer becasue we need to expose Status page on different domains.
type: LoadBalancer
{{- else}}
type: ClusterIP
{{- end }}
---
########################################
{{- if .Values.autoScaler.enabled }}
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: {{ printf "%s-%s" .Release.Name "status" }}
spec:
maxReplicas: {{ .Values.autoScaler.maxReplicas }}
minReplicas: {{ .Values.autoScaler.minReplicas }}
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ printf "%s-%s" .Release.Name "status" }}
targetCPUUtilizationPercentage: {{ .Values.autoScaler.averageCpuUtilization }}
---
{{- end }}

View File

@@ -0,0 +1,44 @@
############----STATUS-PAGE-INGRESS--#####################################
{{- if .Values.oneuptime.hosts.host1.host }}
{{- range $key, $value := $.Values.statusPage.hosts }}
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/affinity: "cookie"
nginx.ingress.kubernetes.io/affinity-mode: "balanced"
nginx.ingress.kubernetes.io/session-cookie-change-on-failure: "false"
nginx.ingress.kubernetes.io/session-cookie-name: "fi-ingress-cookie"
nginx.ingress.kubernetes.io/session-cookie-expires: "172800"
ingress.kubernetes.io/session-cookie-hash: "sha1"
nginx.ingress.kubernetes.io/session-cookie-max-age: "172800"
name: {{ printf "%s-%s-%s" $.Release.Name $key "statuspage" }}
namespace: {{ $.Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
spec:
{{- if $value.tls.enabled }}
tls:
- hosts:
- {{ $value.host }}
secretName: {{ printf "%s-%s-%s" $.Release.Name $key "statuspagetls" }}
{{- end }}
rules:
{{- if $value.host }}
- host: {{ $value.host }}
http:
{{- else }}
- http:
{{- end }}
paths:
- path: /
backend:
serviceName: {{ printf "%s-%s" $.Release.Name "status" }}
servicePort: 80
---
{{- end }}
{{- end }}
##################################

View File

@@ -0,0 +1,18 @@
{{- range $key, $value := $.Values.statusPage.hosts }}
{{- if $value.tls.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s-%s" $.Release.Name $key "statuspagetls" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
tls.crt: {{ $value.tls.crt }}
tls.key: {{ $value.tls.key }}
---
{{- end }}
{{- end }}

View File

@@ -0,0 +1,17 @@
{{- range $key, $value := $.Values.oneuptime.hosts }}
{{- if $value.tls.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ printf "%s-%s-%s" $.Release.Name $key "tls" }}
namespace: {{ .Release.Namespace }}
labels:
app.kubernetes.io/part-of: oneuptime
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
tls.crt: {{ $value.tls.crt }}
tls.key: {{ $value.tls.key }}
---
{{- end }}
{{- end }}

View File

@@ -0,0 +1,328 @@
##################################################################################
## Important: If you're implenting this in the enterprise environment, this should always be `false`.
## This is for OneUptime SaaS service. This will deploy all the SaaS env vars
##################################################################################
saas:
exposeInitScriptContainer: false
isSaasService: false
stripe:
publicKey: #Stripe public and private key
privateKey:
airtable:
key: #Airtbale key to store leads.
baseId:
amplitude:
key: #Amplitude for tracking.
twitter:
bearertoken: # Twitter Bearer Token.
licensing:
airtable: # Airtable for validating licenses.
key:
baseId:
tokenSecret: # Encrypting lisense with tokens to send them to clients.
###################################################################################
statusPage:
hosts:
host1:
host:
tls:
enabled: false
crt:
key:
cert: # certificate for a custom domain
key: # private key for a custom domain
##################################################################################
## Important: OneUptime Values. More information in the Readme.md
##################################################################################
oneuptime:
admin:
email:
password:
hosts:
host1:
host:
tls:
enabled: false
crt:
key:
licensingUrl: https://oneuptime.com/license
###################################################################################
httpTestServer:
hosts:
host1:
host:
tls:
enabled: false
crt:
key:
##################################################################################
## Important: Probe Values. More information in the Readme.md
##################################################################################
probes:
probe1:
port: 3024
name: Probe 1
key: sample-key
servicePort: 80
probe2:
name: Probe 2
port: 3025
key: sample-key
servicePort: 80
##################################################################################
## Important: RateLimitter Values. More information in the Readme.md
##################################################################################
rateLimitter:
enabled: false
requestLimit: 5000
requestLimitTimePeriodInMS: 216000
## OneUptime official image version on Docker Hub
## ref: https://hub.docker.com/u/oneuptimeproject
##
image:
registry: docker.io
repository: oneuptimeproject
tag: latest
pullPolicy: Always
restartPolicy: Always
replicaCount: 1
##################################################################################
## IMPORTANT:
## Values for Subcharts
##
redis:
redisPort: 6379
image:
registry: docker.io
repository: bitnami/redis
tag: latest
pullPolicy: Always
usePassword: false
auth:
enabled: false
persistence:
enabled: true
mountPath: /bitnami/redis
size: 20Gi
mongodb:
architecture: replicaset
auth:
enabled: true
rootPassword: root
username: oneuptime
password: password
database: oneuptimedb
replicaSetKey: mongodboneuptime
replicaSetName: rs0
replicaCount: 2
replicaSetHostnames: true
arbiter:
enabled: true
image:
registry: docker.io
repository: bitnami/mongodb
tag: latest
pullPolicy: Always
# Uncomment this field to enable external access
# to mongodb database
# If the mongodb is failing to initialise, comment out the variable below {externalAccess, serviceAccount, rbac}
# After reinstall the project again
# Once deployed successfully, uncomment the variables and upgrade the project
externalAccess:
enabled: true
service:
type: LoadBalancer
port: 27017
autoDiscovery:
enabled: true
serviceAccount:
create: true
rbac:
create: true
# end of file for external access selectors
persistence:
enabled: true
mountPath: /bitnami/mongodb
size: 1000Gi
useStatefulSet: true
ingress:
enabled: false
## Service Configuration
## For minikube, set service.type to NodePort, elsewhere use LoadBalancer
##
## Service Configuration
## For minikube, set this to NodePort, elsewhere use LoadBalancer
##
nginx-ingress-controller:
defaultBackend:
enabled: false
service:
type: LoadBalancer
proxySetHeaders:
X-Forwarded-For: $http_x_forwarded_for
X-Real-Ip: $http_x_forwarded_for
publishService:
enabled: true
config:
log-format-upstream: '$remote_addr - $http_cf_connecting_ip - $http_x_forwarded_for - $request_id - [$proxy_add_x_forwarded_for] - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $request_length $request_time [$proxy_upstream_name] $upstream_addr $upstream_response_length $upstream_response_time $upstream_status'
proxy-protocol: "true"
real-ip-header: "X-Forwarded-For"
##################################################################################
haraka:
fromEmail:
fromName:
user:
password:
domain:
# This should be base64 of your private key
dkimPrivateKey:
tlsCert:
tlsKey:
host:
backendPort: 3002
dataIngestorPort: 3200
realtimePort: 3300
homePort: 1444
licensingPort: 3004
statusPageHttpPort: 3006
statusPageHttpsPort: 3007
dashboardPort: 3000
accountsPort: 3003
helmChartPort: 3423
apiDocsPort: 1445
initScriptPort: 1447
# if port 25 is avaialble and accessible
# then we can use it as default port
harakaPort: 2525
httpTestServerPort: 3010
adminDashboardPort: 3100
backendServicePort: 80
dataIngestorServicePort: 80
realtimeServicePort: 80
homeServicePort: 80
apiDocsServicePort: 80
statusPageHttpServicePort: 80
statusPageHttpsServicePort: 443
dashboardServicePort: 80
accountsServicePort: 80
adminDashboardServicePort: 80
backendNodePort: 80
statusPageNodePort: 80
dashboardNodePort: 80
accountsNodePort: 80
adminDashboardNodePort: 80
licensingServicePort: 80
helmChartServicePort: 80
httpTestServerServicePort: 80
initScriptServicePort: 80
harakaServicePort: 2525
scriptRunnerPort: 3009
scripRunnerServicePort: 80
applicationScannerPort: 3005
applicationScannerServicePort: 80
containerScannerPort: 3055
containerScannerServicePort: 80
lighthouseRunnerPort: 3015
lighthouseRunnerServicePort: 80
isThirdPartyBilling: false
isRunningOnGCPMarketplace: false
isTesting: false
# Encryption Key
encryptionKey: ThisEncryptionKeyLengthIs32Size.
# If you disable sign up, then you need to specify
# oneuptime.admin.email and oneuptime.admin.password values for the admin account.
disableSignup: false
# What protocol is backend running on. Takes in values like 'http:' or 'https:'
backendProtocol:
# Status page domain to add to CNAME to work with custom domains
# Status page CNAME to display whcih customers will set on the cusom domain tab of status page.
statusPageDomain:
# Push notificaitons.
# Generate public and pivate key for push notifications.
# These are just the test keys.
# You can generate them by:
# First install the web-push npm package using npm install web-push -g, then run web-push generate-vapid-keys
pushNotification:
publicKey: "BD1kb-OchZlXr32bmwpjhoxp_cq-aqK4dWXRDkC5m6Hd9_cvMOUw_bXRFR3pJFGzpEdjQUk5SDdYaXvb7xd-1Dg"
privateKey: "WdFZTeXkuoxpsO_KNOtXvhDUc_Ae1rb-WjPv6AVexA4"
url: "https://oneuptime-test.com"
autoScaler:
enabled: false
averageCpuUtilization: 50
minReplicas: 1
maxReplicas: 1
newRelic:
backend:
licenseKey:
appName:
probe:
licenseKey:
appName:
nodeEnv: development
sentry:
backend:
dsn:
dashboard:
dsn:
accounts:
dsn:
adminDashboard:
dsn:
apiDocs:
dsn:
applicationScanner:
dsn:
containerScanner:
dsn:
dataIngestor:
dsn:
home:
dsn:
httpTestServer:
dsn:
licensing:
dsn:
lighthouse:
dsn:
probe:
dsn:
realtime:
dsn:
scriptRunner:
dsn:
statusPage:
dsn: