From 9a47b02a0c3facf3994caf3e0e1f71c574e109b2 Mon Sep 17 00:00:00 2001 From: Nawaz Dhandala Date: Mon, 16 Mar 2026 10:26:03 +0000 Subject: [PATCH] feat: upgrade time column to DateTime64 for nanosecond precision in logs --- Common/Models/AnalyticsModels/Log.ts | 2 +- .../Utils/AnalyticsDatabase/Statement.ts | 16 ++++++++- .../AnalyticsDatabase/StatementGenerator.ts | 2 ++ .../AnalyticsDatabase/TableColumnType.ts | 1 + Common/Types/Date.ts | 24 +++++++++++++ Internal/Roadmap/Logs.md | 35 +++---------------- Telemetry/Services/FluentLogsIngestService.ts | 2 +- Telemetry/Services/OtelLogsIngestService.ts | 5 ++- Telemetry/Services/SyslogIngestService.ts | 2 +- .../ChangeLogTimeColumnToDateTime64.ts | 22 ++++++++++++ Worker/DataMigrations/Index.ts | 2 ++ 11 files changed, 78 insertions(+), 35 deletions(-) create mode 100644 Worker/DataMigrations/ChangeLogTimeColumnToDateTime64.ts diff --git a/Common/Models/AnalyticsModels/Log.ts b/Common/Models/AnalyticsModels/Log.ts index ffff6a3967..7f345c451b 100644 --- a/Common/Models/AnalyticsModels/Log.ts +++ b/Common/Models/AnalyticsModels/Log.ts @@ -64,7 +64,7 @@ export default class Log extends AnalyticsBaseModel { title: "Time", description: "When was the log created?", required: true, - type: TableColumnType.Date, + type: TableColumnType.DateTime64, accessControl: { read: [ Permission.ProjectOwner, diff --git a/Common/Server/Utils/AnalyticsDatabase/Statement.ts b/Common/Server/Utils/AnalyticsDatabase/Statement.ts index ff69822dab..429f920721 100644 --- a/Common/Server/Utils/AnalyticsDatabase/Statement.ts +++ b/Common/Server/Utils/AnalyticsDatabase/Statement.ts @@ -120,7 +120,11 @@ export class Statement implements BaseQueryParams { finalValue = v.value.values; } } else if (v.value instanceof Date) { - finalValue = OneUptimeDate.toClickhouseDateTime(v.value); + if (typeof v !== "string" && v.type === TableColumnType.DateTime64) { + finalValue = OneUptimeDate.toClickhouseDateTime64(v.value); + } else { + finalValue = OneUptimeDate.toClickhouseDateTime(v.value); + } } else { finalValue = v.value; } @@ -136,6 +140,15 @@ export class Statement implements BaseQueryParams { finalValue = OneUptimeDate.toClickhouseDateTime(finalValue); } + if ( + typeof v !== "string" && + v.type === TableColumnType.DateTime64 && + !(v.value instanceof Date) + ) { + finalValue = OneUptimeDate.fromString(finalValue as string); + finalValue = OneUptimeDate.toClickhouseDateTime64(finalValue); + } + return finalValue; } @@ -176,6 +189,7 @@ export class Statement implements BaseQueryParams { [TableColumnType.Number]: "Int32", [TableColumnType.Decimal]: "Double", [TableColumnType.Date]: "DateTime", + [TableColumnType.DateTime64]: "DateTime64(9)", [TableColumnType.JSON]: "JSON", [TableColumnType.ArrayNumber]: "Array(Int32)", [TableColumnType.ArrayText]: "Array(String)", diff --git a/Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts b/Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts index cc689fed60..8bac3b3ef1 100644 --- a/Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts +++ b/Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts @@ -654,6 +654,7 @@ export default class StatementGenerator { Float32: TableColumnType.Decimal, Float64: TableColumnType.Decimal, DateTime: TableColumnType.Date, + "DateTime64(9)": TableColumnType.DateTime64, "Array(String)": TableColumnType.ArrayText, "Array(Int32)": TableColumnType.ArrayNumber, JSON: TableColumnType.JSON, //JSONArray is also JSON @@ -671,6 +672,7 @@ export default class StatementGenerator { [TableColumnType.IP]: SQL`String`, [TableColumnType.Port]: SQL`String`, [TableColumnType.Date]: SQL`DateTime`, + [TableColumnType.DateTime64]: SQL`DateTime64(9)`, [TableColumnType.JSON]: SQL`String`, // we use JSON as a string because ClickHouse has really good JSON support for string types [TableColumnType.JSONArray]: SQL`String`, // we use JSON as a string because ClickHouse has really good JSON support for string types [TableColumnType.ArrayNumber]: SQL`Array(Int32)`, diff --git a/Common/Types/AnalyticsDatabase/TableColumnType.ts b/Common/Types/AnalyticsDatabase/TableColumnType.ts index 7266a733cd..7120408bf4 100644 --- a/Common/Types/AnalyticsDatabase/TableColumnType.ts +++ b/Common/Types/AnalyticsDatabase/TableColumnType.ts @@ -10,6 +10,7 @@ enum ColumnType { ArrayNumber = "Array of Numbers", ArrayText = "Array of Text", LongNumber = "Long Number", + DateTime64 = "DateTime64", IP = "IP", Port = "Port", } diff --git a/Common/Types/Date.ts b/Common/Types/Date.ts index ff007e04e3..f2d00e3e7a 100644 --- a/Common/Types/Date.ts +++ b/Common/Types/Date.ts @@ -1524,4 +1524,28 @@ export default class OneUptimeDate { const parsedDate: Date = this.fromString(date); return moment(parsedDate).utc().format("YYYY-MM-DD HH:mm:ss"); } + + public static toClickhouseDateTime64( + date: Date | string, + nanoTimestamp?: number, + ): string { + const parsedDate: Date = this.fromString(date); + const base: string = moment(parsedDate) + .utc() + .format("YYYY-MM-DD HH:mm:ss"); + + let nanoFraction: string; + + if (nanoTimestamp !== undefined && nanoTimestamp > 0) { + // Extract sub-second nanoseconds from the unix nano timestamp + const subSecondNanos: number = nanoTimestamp % 1_000_000_000; + nanoFraction = subSecondNanos.toString().padStart(9, "0"); + } else { + // Fall back to milliseconds from the Date object + const ms: number = parsedDate.getMilliseconds(); + nanoFraction = (ms * 1_000_000).toString().padStart(9, "0"); + } + + return `${base}.${nanoFraction}`; + } } diff --git a/Internal/Roadmap/Logs.md b/Internal/Roadmap/Logs.md index b41c45adea..b97fd71dff 100644 --- a/Internal/Roadmap/Logs.md +++ b/Internal/Roadmap/Logs.md @@ -26,6 +26,7 @@ The following features have been implemented and removed from this plan: - **Phase 3.4** - Export to CSV/JSON (Export button in toolbar, LogExport utility with CSV and JSON support) - **Phase 4.2** - Keyboard Shortcuts (j/k navigation, Enter expand/collapse, Esc close, / focus search, Ctrl+Enter apply filters, ? help) - **Phase 4.3** - Sensitive Data Scrubbing (LogScrubRule model with PII patterns: Email, CreditCard, SSN, PhoneNumber, IPAddress, custom regex) +- **Phase 5.3** - DateTime64 time column upgrade (DateTime64(9) nanosecond precision, toClickhouseDateTime64 utility, data migration, all ingestion services updated) ## Gap Analysis Summary @@ -60,30 +61,6 @@ The following features have been implemented and removed from this plan: These optimizations address fundamental storage and indexing gaps in the telemetry tables that directly impact search speed, data correctness, and operational cost. -### 5.3 Upgrade `time` Column to `DateTime64(9)` (High) - -**Current**: The `time` column uses ClickHouse `DateTime` which has **1-second granularity**. Logs within the same second from the same service are stored in arbitrary order. The `timeUnixNano` field (Int128) preserves nanosecond precision but is not in the sort key, so it cannot be used for sub-second ordering. - -**Target**: Use `DateTime64(9)` (nanosecond precision) so the sort key naturally orders logs at sub-second resolution. - -**Implementation**: - -- Change the `time` column type from `TableColumnType.Date` to a new `TableColumnType.DateTime64` in the Log model -- Add `DateTime64` support to `StatementGenerator` and the ClickHouse type mapping in `Statement.ts` -- Update ingestion code in `OtelLogsIngestService.ts` to write DateTime64-compatible timestamps -- Migration: `ALTER TABLE LogItem MODIFY COLUMN time DateTime64(9)` (this is a metadata-only operation in ClickHouse for MergeTree tables) -- Consider whether `timeUnixNano` column can be deprecated after this change since `time` would carry the same precision - -**Impact**: Correct sub-second log ordering. Currently, logs from a burst of activity within the same second may appear in wrong order. - -**Files to modify**: -- `Common/Models/AnalyticsModels/Log.ts` (change column type) -- `Common/Types/AnalyticsDatabase/TableColumnType.ts` (add DateTime64 type) -- `Common/Server/Utils/AnalyticsDatabase/Statement.ts` (add DateTime64 mapping) -- `Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts` (handle DateTime64 in CREATE/SELECT) -- `Telemetry/Services/OtelLogsIngestService.ts` (write DateTime64 timestamps) -- `Worker/DataMigrations/` (new migration) - ### 5.7 Add Projections for Histogram Queries (Medium) **Current**: `projections: []` is empty. Every histogram query (group by time bucket + severity) and facet query scans raw data and performs the aggregation from scratch. @@ -118,18 +95,16 @@ These optimizations address fundamental storage and indexing gaps in the telemet | Optimization | Query Pattern Improved | Expected Speedup | Effort | |-------------|----------------------|-------------------|--------| -| 5.3 DateTime64 time column | Sub-second log ordering | Correctness fix | Medium | | 5.7 Histogram projections | Histogram and severity aggregation | 5-10x | Medium | --- ## Recommended Remaining Implementation Order -1. **5.3** — DateTime64 upgrade (correctness) -2. **5.7** — Projections (performance polish) -3. **Log-based Metrics** (platform capability) -4. **Data Retention Config UI** (operational) -5. **Log Patterns / ML Clustering** (advanced, larger effort) +1. **5.7** — Projections (performance polish) +2. **Log-based Metrics** (platform capability) +3. **Data Retention Config UI** (operational) +4. **Log Patterns / ML Clustering** (advanced, larger effort) --- diff --git a/Telemetry/Services/FluentLogsIngestService.ts b/Telemetry/Services/FluentLogsIngestService.ts index 262d9cfcdd..490d97a805 100644 --- a/Telemetry/Services/FluentLogsIngestService.ts +++ b/Telemetry/Services/FluentLogsIngestService.ts @@ -230,7 +230,7 @@ export default class FluentLogsIngestService extends OtelIngestBaseService { updatedAt: ingestionDateTime, projectId: projectId.toString(), serviceId: serviceMetadata.serviceId.toString(), - time: ingestionDateTime, + time: OneUptimeDate.toClickhouseDateTime64(ingestionDate), timeUnixNano, severityNumber: severityInfo.number, severityText: severityInfo.text, diff --git a/Telemetry/Services/OtelLogsIngestService.ts b/Telemetry/Services/OtelLogsIngestService.ts index ab5831d982..fc2362826c 100644 --- a/Telemetry/Services/OtelLogsIngestService.ts +++ b/Telemetry/Services/OtelLogsIngestService.ts @@ -330,7 +330,10 @@ export default class OtelLogsIngestService extends OtelIngestBaseService { const ingestionTimestamp: string = OneUptimeDate.toClickhouseDateTime(ingestionDate); const logTimestamp: string = - OneUptimeDate.toClickhouseDateTime(timeDate); + OneUptimeDate.toClickhouseDateTime64( + timeDate, + timeUnixNanoNumeric, + ); const retentionDate: Date = OneUptimeDate.addRemoveDays( ingestionDate, diff --git a/Telemetry/Services/SyslogIngestService.ts b/Telemetry/Services/SyslogIngestService.ts index e860d3640a..1b5ca5bea8 100644 --- a/Telemetry/Services/SyslogIngestService.ts +++ b/Telemetry/Services/SyslogIngestService.ts @@ -205,7 +205,7 @@ export default class SyslogIngestService extends OtelIngestBaseService { updatedAt: OneUptimeDate.toClickhouseDateTime(ingestionDate), projectId: projectId.toString(), serviceId: serviceMetadata.serviceId.toString(), - time: OneUptimeDate.toClickhouseDateTime(timestamp), + time: OneUptimeDate.toClickhouseDateTime64(timestamp), timeUnixNano: Math.trunc( OneUptimeDate.toUnixNano(timestamp), ).toString(), diff --git a/Worker/DataMigrations/ChangeLogTimeColumnToDateTime64.ts b/Worker/DataMigrations/ChangeLogTimeColumnToDateTime64.ts new file mode 100644 index 0000000000..33257ceded --- /dev/null +++ b/Worker/DataMigrations/ChangeLogTimeColumnToDateTime64.ts @@ -0,0 +1,22 @@ +import DataMigrationBase from "./DataMigrationBase"; +import LogService from "Common/Server/Services/LogService"; + +export default class ChangeLogTimeColumnToDateTime64 extends DataMigrationBase { + public constructor() { + super("ChangeLogTimeColumnToDateTime64"); + } + + public override async migrate(): Promise { + // ALTER TABLE ... MODIFY COLUMN is a metadata-only operation in ClickHouse MergeTree tables. + // It changes the column type without rewriting data on disk. + await LogService.execute( + `ALTER TABLE ${LogService.model.tableName} MODIFY COLUMN time DateTime64(9)`, + ); + } + + public override async rollback(): Promise { + await LogService.execute( + `ALTER TABLE ${LogService.model.tableName} MODIFY COLUMN time DateTime`, + ); + } +} diff --git a/Worker/DataMigrations/Index.ts b/Worker/DataMigrations/Index.ts index b259e7d9c3..4f401d3cc3 100644 --- a/Worker/DataMigrations/Index.ts +++ b/Worker/DataMigrations/Index.ts @@ -60,6 +60,7 @@ import AddColumnsToExceptionInstance from "./AddColumnsToExceptionInstance"; import AddRetentionDateAndSkipIndexesToTelemetryTables from "./AddRetentionDateAndSkipIndexesToTelemetryTables"; import AddOtelFieldsToLogTable from "./AddOtelFieldsToLogTable"; import AddSpanTableOptimizations from "./AddSpanTableOptimizations"; +import ChangeLogTimeColumnToDateTime64 from "./ChangeLogTimeColumnToDateTime64"; // This is the order in which the migrations will be run. Add new migrations to the end of the array. @@ -124,6 +125,7 @@ const DataMigrations: Array = [ new AddRetentionDateAndSkipIndexesToTelemetryTables(), new AddOtelFieldsToLogTable(), new AddSpanTableOptimizations(), + new ChangeLogTimeColumnToDateTime64(), ]; export default DataMigrations;