refactor: remove LLM-related Docker workflows and associated files

This commit is contained in:
Nawaz Dhandala
2025-12-04 13:47:12 +00:00
parent 30a3c5e1b2
commit 270199806c
8 changed files with 3 additions and 649 deletions

View File

@@ -1647,101 +1647,6 @@ jobs:
run: bash ./Scripts/NPM/PublishAllPackages.sh
llm-docker-image-deploy:
needs: [generate-build-number, read-version]
runs-on: ubuntu-latest
steps:
# Docker compose needs a lot of space to build images, so we need to free up some space first in the GitHub Actions runner
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Docker Meta
id: meta
uses: docker/metadata-action@v4
with:
images: |
oneuptime/llm
ghcr.io/oneuptime/llm
tags: |
type=raw,value=release,enable=true
type=semver,value=${{needs.read-version.outputs.major_minor}},pattern={{version}},enable=true
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- uses: actions/setup-node@v4
with:
node-version: latest
# - name: Setup Git LFS
# run: git lfs install
# # Cannot do this, no space on the gitHub standard runner. We need to use the large runner which is selfhosted
# - name: Download the Model from Hugging Face
# run: mkdir -p ./LLM/Models && cd ./LLM/Models && git clone https://${{ secrets.HUGGING_FACE_USERNAME }}:${{ secrets.HUGGING_FACE_PASSWORD }}@huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
image: tonistiigi/binfmt:qemu-v10.0.4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate Dockerfile from Dockerfile.tpl
uses: nick-fields/retry@v3
with:
timeout_minutes: 10
max_attempts: 3
command: npm run prerun
# Build and deploy nginx.
- name: Login to Docker Hub
uses: nick-fields/retry@v3
with:
timeout_minutes: 5
max_attempts: 3
command: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
uses: nick-fields/retry@v3
with:
timeout_minutes: 5
max_attempts: 3
command: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Build and push
uses: nick-fields/retry@v3
with:
timeout_minutes: 45
max_attempts: 3
command: |
bash ./Scripts/GHA/build_docker_images.sh \
--image llm \
--version "${{needs.read-version.outputs.major_minor}}" \
--dockerfile ./LLM/Dockerfile \
--context ./LLM \
--platforms linux/amd64 \
--git-sha "${{ github.sha }}"
docs-docker-image-deploy:
needs: [generate-build-number, read-version]
runs-on: ubuntu-latest
@@ -2131,7 +2036,6 @@ jobs:
- app-docker-image-deploy
- copilot-docker-image-deploy
- accounts-docker-image-deploy
- llm-docker-image-deploy
- docs-docker-image-deploy
- worker-docker-image-deploy
- workflow-docker-image-deploy
@@ -2162,7 +2066,6 @@ jobs:
"app",
"copilot",
"accounts",
"llm",
"docs",
"worker",
"workflow",
@@ -2221,7 +2124,7 @@ jobs:
test-e2e-release-saas:
runs-on: ubuntu-latest
needs: [telemetry-docker-image-deploy, publish-mcp-server, copilot-docker-image-deploy, docs-docker-image-deploy, api-reference-docker-image-deploy, workflow-docker-image-deploy, llm-docker-image-deploy, accounts-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, dashboard-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, worker-docker-image-deploy, otel-collector-docker-image-deploy, probe-docker-image-deploy, status-page-docker-image-deploy, test-docker-image-deploy, test-server-docker-image-deploy, publish-npm-packages, e2e-docker-image-deploy, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-deploy, incoming-request-ingest-docker-image-deploy]
needs: [telemetry-docker-image-deploy, publish-mcp-server, copilot-docker-image-deploy, docs-docker-image-deploy, api-reference-docker-image-deploy, workflow-docker-image-deploy, accounts-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, dashboard-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, worker-docker-image-deploy, otel-collector-docker-image-deploy, probe-docker-image-deploy, status-page-docker-image-deploy, test-docker-image-deploy, test-server-docker-image-deploy, publish-npm-packages, e2e-docker-image-deploy, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-deploy, incoming-request-ingest-docker-image-deploy]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:
@@ -2309,7 +2212,7 @@ jobs:
test-e2e-release-self-hosted:
runs-on: ubuntu-latest
# After all the jobs runs
needs: [telemetry-docker-image-deploy, publish-mcp-server, copilot-docker-image-deploy, incoming-request-ingest-docker-image-deploy, docs-docker-image-deploy, api-reference-docker-image-deploy, workflow-docker-image-deploy, llm-docker-image-deploy, accounts-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, dashboard-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, worker-docker-image-deploy, otel-collector-docker-image-deploy, probe-docker-image-deploy, status-page-docker-image-deploy, test-docker-image-deploy, test-server-docker-image-deploy, publish-npm-packages, e2e-docker-image-deploy, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-deploy]
needs: [telemetry-docker-image-deploy, publish-mcp-server, copilot-docker-image-deploy, incoming-request-ingest-docker-image-deploy, docs-docker-image-deploy, api-reference-docker-image-deploy, workflow-docker-image-deploy, accounts-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, dashboard-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, worker-docker-image-deploy, otel-collector-docker-image-deploy, probe-docker-image-deploy, status-page-docker-image-deploy, test-docker-image-deploy, test-server-docker-image-deploy, publish-npm-packages, e2e-docker-image-deploy, helm-chart-deploy, generate-build-number, read-version, nginx-docker-image-deploy]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:

View File

@@ -207,103 +207,6 @@ jobs:
llm-docker-image-deploy:
needs: [read-version, generate-build-number]
runs-on: ubuntu-latest
steps:
# Docker compose needs a lot of space to build images, so we need to free up some space first in the GitHub Actions runner
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# if set to "true" but frees about 6 GB
tool-cache: false
android: true
dotnet: true
haskell: true
large-packages: true
docker-images: true
swap-storage: true
- name: Docker Meta
id: meta
uses: docker/metadata-action@v4
with:
images: |
oneuptime/llm
ghcr.io/oneuptime/llm
tags: |
type=raw,value=test,enable=true
type=raw,value=${{needs.read-version.outputs.major_minor}}-test,enable=true
- uses: actions/checkout@v4
with:
ref: ${{ github.ref }}
- uses: actions/setup-node@v4
with:
node-version: latest
# - name: Setup Git LFS
# run: git lfs install
# # Cannot do this, no space on the gitHub standard runner. We need to use the large runner which is selfhosted
# - name: Download the Model from Hugging Face
# run: mkdir -p ./LLM/Models && cd ./LLM/Models && git clone https://${{ secrets.HUGGING_FACE_USERNAME }}:${{ secrets.HUGGING_FACE_PASSWORD }}@huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
image: tonistiigi/binfmt:qemu-v10.0.4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Generate Dockerfile from Dockerfile.tpl
uses: nick-fields/retry@v3
with:
timeout_minutes: 10
max_attempts: 3
command: npm run prerun
# Build and deploy nginx.
- name: Login to Docker Hub
uses: nick-fields/retry@v3
with:
timeout_minutes: 5
max_attempts: 3
command: |
echo "${{ secrets.DOCKERHUB_PASSWORD }}" | docker login --username "${{ secrets.DOCKERHUB_USERNAME }}" --password-stdin
- name: Login to GitHub Container Registry
uses: nick-fields/retry@v3
with:
timeout_minutes: 5
max_attempts: 3
command: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io --username "${{ github.repository_owner }}" --password-stdin
- name: Build and push
uses: nick-fields/retry@v3
with:
timeout_minutes: 30
max_attempts: 3
command: |
bash ./Scripts/GHA/build_docker_images.sh \
--image llm \
--version "${{needs.read-version.outputs.major_minor}}-test" \
--dockerfile ./LLM/Dockerfile \
--context ./LLM \
--platforms linux/amd64 \
--git-sha "${{ github.sha }}" \
--extra-tags test \
--extra-enterprise-tags enterprise-test
nginx-docker-image-deploy:
needs: [read-version, generate-build-number]
runs-on: ubuntu-latest
@@ -1965,7 +1868,7 @@ jobs:
test-helm-chart:
runs-on: ubuntu-latest
needs: [infrastructure-agent-deploy, publish-mcp-server, llm-docker-image-deploy, publish-terraform-provider, telemetry-docker-image-deploy, copilot-docker-image-deploy, docs-docker-image-deploy, worker-docker-image-deploy, workflow-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, api-reference-docker-image-deploy, test-server-docker-image-deploy, test-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, probe-docker-image-deploy, dashboard-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, accounts-docker-image-deploy, otel-collector-docker-image-deploy, status-page-docker-image-deploy, nginx-docker-image-deploy, e2e-docker-image-deploy, incoming-request-ingest-docker-image-deploy]
needs: [infrastructure-agent-deploy, publish-mcp-server, publish-terraform-provider, telemetry-docker-image-deploy, copilot-docker-image-deploy, docs-docker-image-deploy, worker-docker-image-deploy, workflow-docker-image-deploy, isolated-vm-docker-image-deploy, home-docker-image-deploy, api-reference-docker-image-deploy, test-server-docker-image-deploy, test-docker-image-deploy, probe-ingest-docker-image-deploy, server-monitor-ingest-docker-image-deploy, probe-docker-image-deploy, dashboard-docker-image-deploy, admin-dashboard-docker-image-deploy, app-docker-image-deploy, accounts-docker-image-deploy, otel-collector-docker-image-deploy, status-page-docker-image-deploy, nginx-docker-image-deploy, e2e-docker-image-deploy, incoming-request-ingest-docker-image-deploy]
env:
CI_PIPELINE_ID: ${{github.run_number}}
steps:

View File

@@ -1 +0,0 @@
Models/*

View File

@@ -1,20 +0,0 @@
# Use an official Python runtime as a parent image
FROM huggingface/transformers-pytorch-gpu:latest
ARG IS_ENTERPRISE_EDITION=false
ENV IS_ENTERPRISE_EDITION=${IS_ENTERPRISE_EDITION}
# Set the working directory in the container to /app
WORKDIR /app
# Copy the current directory contents into the container at /app
ADD . /app
# Install any needed packages specified in requirements.txt
RUN pip install --no-cache-dir -r requirements.txt
# Make port 8547 available to the world outside this container
EXPOSE 8547
# Run app.py when the container launches
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8547" ]

View File

@@ -1,81 +0,0 @@
# LLM
### Development Guide
#### Step 1: Downloading Model from Hugging Face
Please make sure you have git lfs installed before cloning the model.
```bash
git lfs install
```
```bash
cd ./LLM/Models
# Here we are downloading the Meta-Llama-3-8B-Instruct model
git clone https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
```
You will be asked for username and password.
Please use Hugging Face Username as Username and,
Hugging Face API Token as Password.
#### Step 2: Install Docker.
Install Docker and Docker Compose
```bash
sudo apt-get update
sudo curl -sSL https://get.docker.com/ | sh
```
Install Rootless Docker
```bash
sudo apt-get install -y uidmap
dockerd-rootless-setuptool.sh install
```
See if the installation works
```bash
docker --version
docker ps
# You should see no containers running, but you should not see any errors.
```
#### Step 3: Insall nvidia drivers on the machine to use GPU
- Install Container Toolkit: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html#installing-the-nvidia-container-toolkit
- Install CUDA: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&Distribution=Ubuntu&target_version=22.04&target_type=deb_network
- Restart the machine
- You should now see GPU when you run `nvidia-smi`
#### Step 4: Run the test workload to see if GPU is connected to Docker.
```bash
docker run --rm -it --gpus=all nvcr.io/nvidia/k8s/cuda-sample:nbody nbody -gpu -benchmark
```
You have configured the machine to use GPU with Docker.
### Build
- Download models from meta
- Once the model is downloaded, place them in the `Llama/Models` folder. Please make sure you also place tokenizer.model and tokenizer_checklist.chk in the same folder.
- Edit `Dockerfile` to include the model name in the `MODEL_NAME` variable.
- Docker build
```
npm run build-ai
```
### Run
```
npm run start-ai
```
After you start, run `nvidia-smi` to see if the GPU is being used. You should see the python process running on the GPU.

View File

@@ -1,227 +0,0 @@
import uuid
import transformers
import asyncio
import os
import torch
import aiohttp
from fastapi import FastAPI
from pydantic import BaseModel
from contextlib import asynccontextmanager
from apscheduler.schedulers.background import BackgroundScheduler
# ENV VARS
ONEUPTIME_URL = os.getenv("ONEUPTIME_URL")
HF_MODEL_NAME = os.getenv("HF_MODEL_NAME")
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_MODEL_NAME:
HF_MODEL_NAME = "meta-llama/Meta-Llama-3-8B-Instruct"
print(f"HF_MODEL_NAME not set. Using default model: {HF_MODEL_NAME}")
if not ONEUPTIME_URL:
ONEUPTIME_URL = "https://oneuptime.com"
if not HF_TOKEN:
# Print error and exit
print("HF_TOKEN env var is required. This is the Hugging Face API token. You can get it from https://huggingface.co/account/overview. Exiting..")
exit()
# TODO: Store this in redis down the line.
items_pending = {}
items_processed = {}
errors = {}
async def validateSecretKey(secretKey):
try:
# If no secret key then return false
if not secretKey:
return False
async with aiohttp.ClientSession() as session:
print(f"Validating secret key")
url = f"{ONEUPTIME_URL}/api/copilot-code-repository/is-valid/{secretKey}"
async with session.get(url) as response:
print(response)
if response.status == 200:
return True
else:
return False
except Exception as e:
print(repr(e))
return False
async def job(queue):
print("Downlaoding model from Hugging Face: "+HF_MODEL_NAME)
# check if the model is meta-llama/Meta-Llama-3-8B-Instruct
if HF_MODEL_NAME == "meta-llama/Meta-Llama-3-8B-Instruct":
print("If you want to use a different model, please set the HF_MODEL_NAME environment variable.")
print("This may take a while (minutes or sometimes hours) depending on the model size.")
# model_path = "/app/Models/Meta-Llama-3-8B-Instruct"
model_path = HF_MODEL_NAME
pipe = transformers.pipeline(
"text-generation",
model=model_path,
# use gpu if available
device="cuda" if torch.cuda.is_available() else "cpu",
# max_new_tokens=8096
)
print("Model downloaded.")
while True:
random_id = None
try:
# process this item.
random_id = await queue.get()
print(f"Processing item {random_id}")
messages = items_pending[random_id]
print(f"Messages:")
print(messages)
outputs = pipe(messages)
items_processed[random_id] = outputs
del items_pending[random_id]
print(f"Processed item {random_id}")
except Exception as e:
print(f"Error processing item {random_id}")
# store error
errors[random_id] = repr(e)
# delete from items_pending
if random_id in items_pending:
del items_pending[random_id]
print(e)
@asynccontextmanager
async def lifespan(app:FastAPI):
queue = asyncio.Queue()
app.model_queue = queue
asyncio.create_task(job(queue))
yield
# Declare a Pydantic model for the request body
class Prompt(BaseModel):
messages: list
# secretkey: str
# Declare a Pydantic model for the request body
class PromptResult(BaseModel):
id: str
# secretkey: str
app = FastAPI(lifespan=lifespan)
@app.get("/")
async def root():
return {"status": "ok"}
@app.get("/status")
async def status():
return {"status": "ok"}
@app.post("/prompt/")
async def create_item(prompt: Prompt):
try:
# If not prompt then return bad request error
if not prompt:
return {"error": "Prompt is required"}
# Validate the secret key
# is_valid = await validateSecretKey(prompt.secretkey)
# if not is_valid:
# print("Invalid secret key")
# return {"error": "Invalid secret key"}
# messages are in str format. We need to convert them fron json [] to list
messages = prompt.messages
# Log prompt to console
print(messages)
# Generate UUID
random_id = str(uuid.uuid4())
# add to queue
items_pending[random_id] = messages
await app.model_queue.put(random_id)
# Return response
return {
"id": random_id,
"status": "queued"
}
except Exception as e:
print(e)
return {"error": repr(e)}
# Disable this API in production
@app.get("/queue-status/")
async def queue_status():
try:
return {"pending": items_pending, "processed": items_processed, "queue": app.model_queue.qsize(), "errors": errors}
except Exception as e:
print(e)
return {"error": repr(e)}
@app.post("/prompt-result/")
async def prompt_status(prompt_status: PromptResult):
try:
# Log prompt status to console
print(prompt_status)
# Validate the secret key
# is_valid = await validateSecretKey(prompt_status.secretkey)
# if not is_valid:
# print("Invalid secret key")
# return {"error": "Invalid secret key"}
# If not prompt status then return bad request error
if not prompt_status:
return {"error": "Prompt status is required"}
# check if item is processed.
if prompt_status.id in items_processed:
return_value = {
"id": prompt_status.id,
"status": "processed",
"output": items_processed[prompt_status.id]
}
# delete from item_processed
del items_processed[prompt_status.id]
return return_value
else:
status = "not found"
if prompt_status.id in items_pending:
status = "pending"
return {
"id": prompt_status.id,
"status": status
}
except Exception as e:
print(e)
return {"error": repr(e)}

View File

@@ -1,11 +0,0 @@
# Hugging Face Transformers
transformers==4.41.2
# Rest api related stuff.
fastapi===0.109.1
uvicorn===0.23.2
pydantic===2.4.2
# Rest of the app
APScheduler===3.10.4
aiohttp===3.9.5

View File

@@ -1,112 +0,0 @@
{
"ts-node": {
// these options are overrides used only by ts-node
// same as the --compilerOptions flag and the TS_NODE_COMPILER_OPTIONS environment variable
"compilerOptions": {
"module": "commonjs",
"resolveJsonModule": true,
}
},
"compilerOptions": {
/* Visit https://aka.ms/tsconfig.json to read more about this file */
/* Projects */
// "incremental": true, /* Enable incremental compilation */
// "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
// "tsBuildInfoFile": "./", /* Specify the folder for .tsbuildinfo incremental compilation files. */
// "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects */
// "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
// "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
"module": "ES2020",
/* Language and Environment */
"target": "es2020" /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */,
// "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
"jsx": "react" /* Specify what JSX code is generated. */,
"experimentalDecorators": true, /* Enable experimental support for TC39 stage 2 draft decorators. */
"emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
// "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h' */
// "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
// "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using `jsx: react-jsx*`.` */
// "reactNamespace": "", /* Specify the object invoked for `createElement`. This only applies when targeting `react` JSX emit. */
// "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
// "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
/* Modules */
// "module": "es2022" /* Specify what module code is generated. */,
// "rootDir": "./", /* Specify the root folder within your source files. */
"moduleResolution": "node", /* Specify how TypeScript looks up a file from a given module specifier. */
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
"typeRoots": [
"./node_modules/@types"
], /* Specify multiple folders that act like `./node_modules/@types`. */
"types": ["node"], /* Specify type package names to be included without being referenced in a source file. */
// "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
// "resolveJsonModule": true, /* Enable importing .json files */
// "noResolve": true, /* Disallow `import`s, `require`s or `<reference>`s from expanding the number of files TypeScript should add to a project. */
/* JavaScript Support */
// "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the `checkJS` option to get errors from these files. */
// "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
// "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from `node_modules`. Only applicable with `allowJs`. */
/* Emit */
// "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
// "declarationMap": true, /* Create sourcemaps for d.ts files. */
// "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
"sourceMap": true, /* Create source map files for emitted JavaScript files. */
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If `declaration` is true, also designates a file that bundles all .d.ts output. */
"outDir": "./build/dist", /* Specify an output folder for all emitted files. */
// "removeComments": true, /* Disable emitting comments. */
// "noEmit": true, /* Disable emitting files from a compilation. */
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
// "importsNotUsedAsValues": "remove", /* Specify emit/checking behavior for imports that are only used for types */
// "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
// "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
// "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
// "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
// "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
// "newLine": "crlf", /* Set the newline character for emitting files. */
// "stripInternal": true, /* Disable emitting declarations that have `@internal` in their JSDoc comments. */
// "noEmitHelpers": true, /* Disable generating custom helper functions like `__extends` in compiled output. */
// "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
// "preserveConstEnums": true, /* Disable erasing `const enum` declarations in generated code. */
// "declarationDir": "./", /* Specify the output directory for generated declaration files. */
// "preserveValueImports": true, /* Preserve unused imported values in the JavaScript output that would otherwise be removed. */
/* Interop Constraints */
// "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
// "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
"esModuleInterop": true /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables `allowSyntheticDefaultImports` for type compatibility. */,
// "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
"forceConsistentCasingInFileNames": true /* Ensure that casing is correct in imports. */,
/* Type Checking */
"strict": true /* Enable all strict type-checking options. */,
"noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied `any` type.. */
"strictNullChecks": true, /* When type checking, take into account `null` and `undefined`. */
"strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
"strictBindCallApply": true, /* Check that the arguments for `bind`, `call`, and `apply` methods match the original function. */
"strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
"noImplicitThis": true, /* Enable error reporting when `this` is given the type `any`. */
"useUnknownInCatchVariables": true, /* Type catch clause variables as 'unknown' instead of 'any'. */
"alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
"noUnusedLocals": true, /* Enable error reporting when a local variables aren't read. */
"noUnusedParameters": true, /* Raise an error when a function parameter isn't read */
"exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
"noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
"noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
"noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */
"noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
"noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type */
// "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
// "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
/* Completeness */
// "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
"skipLibCheck": true, /* Skip type checking all .d.ts files. */
"resolveJsonModule": true
}
}