feat: upgrade time column to DateTime64 for nanosecond precision in logs

This commit is contained in:
Nawaz Dhandala
2026-03-16 10:26:03 +00:00
parent 3ccd089d4f
commit 9a47b02a0c
11 changed files with 78 additions and 35 deletions

View File

@@ -64,7 +64,7 @@ export default class Log extends AnalyticsBaseModel {
title: "Time",
description: "When was the log created?",
required: true,
type: TableColumnType.Date,
type: TableColumnType.DateTime64,
accessControl: {
read: [
Permission.ProjectOwner,

View File

@@ -120,7 +120,11 @@ export class Statement implements BaseQueryParams {
finalValue = v.value.values;
}
} else if (v.value instanceof Date) {
finalValue = OneUptimeDate.toClickhouseDateTime(v.value);
if (typeof v !== "string" && v.type === TableColumnType.DateTime64) {
finalValue = OneUptimeDate.toClickhouseDateTime64(v.value);
} else {
finalValue = OneUptimeDate.toClickhouseDateTime(v.value);
}
} else {
finalValue = v.value;
}
@@ -136,6 +140,15 @@ export class Statement implements BaseQueryParams {
finalValue = OneUptimeDate.toClickhouseDateTime(finalValue);
}
if (
typeof v !== "string" &&
v.type === TableColumnType.DateTime64 &&
!(v.value instanceof Date)
) {
finalValue = OneUptimeDate.fromString(finalValue as string);
finalValue = OneUptimeDate.toClickhouseDateTime64(finalValue);
}
return finalValue;
}
@@ -176,6 +189,7 @@ export class Statement implements BaseQueryParams {
[TableColumnType.Number]: "Int32",
[TableColumnType.Decimal]: "Double",
[TableColumnType.Date]: "DateTime",
[TableColumnType.DateTime64]: "DateTime64(9)",
[TableColumnType.JSON]: "JSON",
[TableColumnType.ArrayNumber]: "Array(Int32)",
[TableColumnType.ArrayText]: "Array(String)",

View File

@@ -654,6 +654,7 @@ export default class StatementGenerator<TBaseModel extends AnalyticsBaseModel> {
Float32: TableColumnType.Decimal,
Float64: TableColumnType.Decimal,
DateTime: TableColumnType.Date,
"DateTime64(9)": TableColumnType.DateTime64,
"Array(String)": TableColumnType.ArrayText,
"Array(Int32)": TableColumnType.ArrayNumber,
JSON: TableColumnType.JSON, //JSONArray is also JSON
@@ -671,6 +672,7 @@ export default class StatementGenerator<TBaseModel extends AnalyticsBaseModel> {
[TableColumnType.IP]: SQL`String`,
[TableColumnType.Port]: SQL`String`,
[TableColumnType.Date]: SQL`DateTime`,
[TableColumnType.DateTime64]: SQL`DateTime64(9)`,
[TableColumnType.JSON]: SQL`String`, // we use JSON as a string because ClickHouse has really good JSON support for string types
[TableColumnType.JSONArray]: SQL`String`, // we use JSON as a string because ClickHouse has really good JSON support for string types
[TableColumnType.ArrayNumber]: SQL`Array(Int32)`,

View File

@@ -10,6 +10,7 @@ enum ColumnType {
ArrayNumber = "Array of Numbers",
ArrayText = "Array of Text",
LongNumber = "Long Number",
DateTime64 = "DateTime64",
IP = "IP",
Port = "Port",
}

View File

@@ -1524,4 +1524,28 @@ export default class OneUptimeDate {
const parsedDate: Date = this.fromString(date);
return moment(parsedDate).utc().format("YYYY-MM-DD HH:mm:ss");
}
public static toClickhouseDateTime64(
date: Date | string,
nanoTimestamp?: number,
): string {
const parsedDate: Date = this.fromString(date);
const base: string = moment(parsedDate)
.utc()
.format("YYYY-MM-DD HH:mm:ss");
let nanoFraction: string;
if (nanoTimestamp !== undefined && nanoTimestamp > 0) {
// Extract sub-second nanoseconds from the unix nano timestamp
const subSecondNanos: number = nanoTimestamp % 1_000_000_000;
nanoFraction = subSecondNanos.toString().padStart(9, "0");
} else {
// Fall back to milliseconds from the Date object
const ms: number = parsedDate.getMilliseconds();
nanoFraction = (ms * 1_000_000).toString().padStart(9, "0");
}
return `${base}.${nanoFraction}`;
}
}

View File

@@ -26,6 +26,7 @@ The following features have been implemented and removed from this plan:
- **Phase 3.4** - Export to CSV/JSON (Export button in toolbar, LogExport utility with CSV and JSON support)
- **Phase 4.2** - Keyboard Shortcuts (j/k navigation, Enter expand/collapse, Esc close, / focus search, Ctrl+Enter apply filters, ? help)
- **Phase 4.3** - Sensitive Data Scrubbing (LogScrubRule model with PII patterns: Email, CreditCard, SSN, PhoneNumber, IPAddress, custom regex)
- **Phase 5.3** - DateTime64 time column upgrade (DateTime64(9) nanosecond precision, toClickhouseDateTime64 utility, data migration, all ingestion services updated)
## Gap Analysis Summary
@@ -60,30 +61,6 @@ The following features have been implemented and removed from this plan:
These optimizations address fundamental storage and indexing gaps in the telemetry tables that directly impact search speed, data correctness, and operational cost.
### 5.3 Upgrade `time` Column to `DateTime64(9)` (High)
**Current**: The `time` column uses ClickHouse `DateTime` which has **1-second granularity**. Logs within the same second from the same service are stored in arbitrary order. The `timeUnixNano` field (Int128) preserves nanosecond precision but is not in the sort key, so it cannot be used for sub-second ordering.
**Target**: Use `DateTime64(9)` (nanosecond precision) so the sort key naturally orders logs at sub-second resolution.
**Implementation**:
- Change the `time` column type from `TableColumnType.Date` to a new `TableColumnType.DateTime64` in the Log model
- Add `DateTime64` support to `StatementGenerator` and the ClickHouse type mapping in `Statement.ts`
- Update ingestion code in `OtelLogsIngestService.ts` to write DateTime64-compatible timestamps
- Migration: `ALTER TABLE LogItem MODIFY COLUMN time DateTime64(9)` (this is a metadata-only operation in ClickHouse for MergeTree tables)
- Consider whether `timeUnixNano` column can be deprecated after this change since `time` would carry the same precision
**Impact**: Correct sub-second log ordering. Currently, logs from a burst of activity within the same second may appear in wrong order.
**Files to modify**:
- `Common/Models/AnalyticsModels/Log.ts` (change column type)
- `Common/Types/AnalyticsDatabase/TableColumnType.ts` (add DateTime64 type)
- `Common/Server/Utils/AnalyticsDatabase/Statement.ts` (add DateTime64 mapping)
- `Common/Server/Utils/AnalyticsDatabase/StatementGenerator.ts` (handle DateTime64 in CREATE/SELECT)
- `Telemetry/Services/OtelLogsIngestService.ts` (write DateTime64 timestamps)
- `Worker/DataMigrations/` (new migration)
### 5.7 Add Projections for Histogram Queries (Medium)
**Current**: `projections: []` is empty. Every histogram query (group by time bucket + severity) and facet query scans raw data and performs the aggregation from scratch.
@@ -118,18 +95,16 @@ These optimizations address fundamental storage and indexing gaps in the telemet
| Optimization | Query Pattern Improved | Expected Speedup | Effort |
|-------------|----------------------|-------------------|--------|
| 5.3 DateTime64 time column | Sub-second log ordering | Correctness fix | Medium |
| 5.7 Histogram projections | Histogram and severity aggregation | 5-10x | Medium |
---
## Recommended Remaining Implementation Order
1. **5.3** — DateTime64 upgrade (correctness)
2. **5.7** — Projections (performance polish)
3. **Log-based Metrics** (platform capability)
4. **Data Retention Config UI** (operational)
5. **Log Patterns / ML Clustering** (advanced, larger effort)
1. **5.7** — Projections (performance polish)
2. **Log-based Metrics** (platform capability)
3. **Data Retention Config UI** (operational)
4. **Log Patterns / ML Clustering** (advanced, larger effort)
---

View File

@@ -230,7 +230,7 @@ export default class FluentLogsIngestService extends OtelIngestBaseService {
updatedAt: ingestionDateTime,
projectId: projectId.toString(),
serviceId: serviceMetadata.serviceId.toString(),
time: ingestionDateTime,
time: OneUptimeDate.toClickhouseDateTime64(ingestionDate),
timeUnixNano,
severityNumber: severityInfo.number,
severityText: severityInfo.text,

View File

@@ -330,7 +330,10 @@ export default class OtelLogsIngestService extends OtelIngestBaseService {
const ingestionTimestamp: string =
OneUptimeDate.toClickhouseDateTime(ingestionDate);
const logTimestamp: string =
OneUptimeDate.toClickhouseDateTime(timeDate);
OneUptimeDate.toClickhouseDateTime64(
timeDate,
timeUnixNanoNumeric,
);
const retentionDate: Date = OneUptimeDate.addRemoveDays(
ingestionDate,

View File

@@ -205,7 +205,7 @@ export default class SyslogIngestService extends OtelIngestBaseService {
updatedAt: OneUptimeDate.toClickhouseDateTime(ingestionDate),
projectId: projectId.toString(),
serviceId: serviceMetadata.serviceId.toString(),
time: OneUptimeDate.toClickhouseDateTime(timestamp),
time: OneUptimeDate.toClickhouseDateTime64(timestamp),
timeUnixNano: Math.trunc(
OneUptimeDate.toUnixNano(timestamp),
).toString(),

View File

@@ -0,0 +1,22 @@
import DataMigrationBase from "./DataMigrationBase";
import LogService from "Common/Server/Services/LogService";
export default class ChangeLogTimeColumnToDateTime64 extends DataMigrationBase {
public constructor() {
super("ChangeLogTimeColumnToDateTime64");
}
public override async migrate(): Promise<void> {
// ALTER TABLE ... MODIFY COLUMN is a metadata-only operation in ClickHouse MergeTree tables.
// It changes the column type without rewriting data on disk.
await LogService.execute(
`ALTER TABLE ${LogService.model.tableName} MODIFY COLUMN time DateTime64(9)`,
);
}
public override async rollback(): Promise<void> {
await LogService.execute(
`ALTER TABLE ${LogService.model.tableName} MODIFY COLUMN time DateTime`,
);
}
}

View File

@@ -60,6 +60,7 @@ import AddColumnsToExceptionInstance from "./AddColumnsToExceptionInstance";
import AddRetentionDateAndSkipIndexesToTelemetryTables from "./AddRetentionDateAndSkipIndexesToTelemetryTables";
import AddOtelFieldsToLogTable from "./AddOtelFieldsToLogTable";
import AddSpanTableOptimizations from "./AddSpanTableOptimizations";
import ChangeLogTimeColumnToDateTime64 from "./ChangeLogTimeColumnToDateTime64";
// This is the order in which the migrations will be run. Add new migrations to the end of the array.
@@ -124,6 +125,7 @@ const DataMigrations: Array<DataMigrationBase> = [
new AddRetentionDateAndSkipIndexesToTelemetryTables(),
new AddOtelFieldsToLogTable(),
new AddSpanTableOptimizations(),
new ChangeLogTimeColumnToDateTime64(),
];
export default DataMigrations;