diff --git a/data/logs.sqlite3-shm b/data/logs.sqlite3-shm new file mode 100644 index 0000000..1e26a01 --- /dev/null +++ b/data/logs.sqlite3-shm Binary files differ diff --git a/data/logs.sqlite3-wal b/data/logs.sqlite3-wal new file mode 100644 index 0000000..0c97db1 --- /dev/null +++ b/data/logs.sqlite3-wal Binary files differ diff --git a/public/js/logs.js b/public/js/logs.js index b11051e..a148ec8 100644 --- a/public/js/logs.js +++ b/public/js/logs.js @@ -10,6 +10,7 @@ this.hasMore = true; this.totalPages = 0; this.currentFilters = {}; + this.abortController = null; // For canceling requests this.setupEventListeners(); this.setupPaginationControls(); @@ -21,11 +22,15 @@ this.resetAndLoad(); }); - // Continuous scrolling + // Debounced scroll handler to prevent excessive calls + let scrollTimeout; window.addEventListener("scroll", () => { - if (this.isNearBottom() && !this.isLoading && this.hasMore) { - this.loadNextPage(); - } + if (scrollTimeout) clearTimeout(scrollTimeout); + scrollTimeout = setTimeout(() => { + if (this.isNearBottom() && !this.isLoading && this.hasMore) { + this.loadNextPage(); + } + }, 100); }); } @@ -87,12 +92,20 @@ } async resetAndLoad() { + // Cancel any pending request + if (this.abortController) { + this.abortController.abort(); + } + this.currentPage = 1; this.hasMore = true; - this.theadRow.innerHTML = ""; - this.tbody.innerHTML = ""; this.columns = []; this.currentFilters = new URLSearchParams(new FormData(this.form)); + + // Clear table efficiently + this.theadRow.innerHTML = ""; + this.tbody.innerHTML = ""; + await this.loadLogs(false); } @@ -105,10 +118,18 @@ async goToPage(page) { if (page >= 1 && page <= this.totalPages && page !== this.currentPage) { + // Cancel any pending request + if (this.abortController) { + this.abortController.abort(); + } + this.currentPage = page; + this.columns = []; + + // Clear table efficiently this.theadRow.innerHTML = ""; this.tbody.innerHTML = ""; - this.columns = []; + await this.loadLogs(false); } } @@ -119,6 +140,9 @@ this.isLoading = true; this.showLoading(); + // Create new abort controller for this request + this.abortController = new AbortController(); + const params = new URLSearchParams(this.currentFilters); params.append("page", this.currentPage); params.append("limit", this.limit); @@ -131,6 +155,7 @@ Accept: "application/json", }, body: params.toString(), + signal: this.abortController.signal, }); if (!res.ok) throw new Error(await res.text()); @@ -152,6 +177,11 @@ data.pagination.total ); } catch (error) { + // Don't show error if request was aborted + if (error.name === "AbortError") { + return; + } + const errorMsg = `
${value
- .replace(//g, ">")}`;
+ // Create pre element for stack traces
+ const pre = document.createElement("pre");
+ pre.textContent = value; // textContent automatically escapes
+ td.appendChild(pre);
+ } else if (col === "timestamp") {
+ // Format timestamp nicely
+ td.textContent = value ? new Date(value).toLocaleString() : "";
+ } else if (typeof value === "object" && value !== null) {
+ // Handle objects/arrays
+ td.textContent = JSON.stringify(value, null, 2);
+ td.style.fontFamily = "monospace";
} else {
td.textContent = value ?? "";
}
+
tr.appendChild(td);
}
- this.tbody.appendChild(tr);
+
+ fragment.appendChild(tr);
}
+
+ // Single DOM append operation
+ this.tbody.appendChild(fragment);
+ }
+
+ updateHeaders() {
+ // Clear and rebuild headers efficiently
+ this.theadRow.innerHTML = "";
+
+ // Use DocumentFragment for headers too
+ const fragment = document.createDocumentFragment();
+
+ for (const col of this.columns) {
+ const th = document.createElement("th");
+ th.textContent = col;
+
+ // Add some basic styling for better UX
+ if (col === "timestamp") {
+ th.style.minWidth = "150px";
+ } else if (col === "level") {
+ th.style.width = "80px";
+ } else if (col === "stack") {
+ th.style.width = "300px";
+ }
+
+ fragment.appendChild(th);
+ }
+
+ this.theadRow.appendChild(fragment);
}
updatePaginationInfo(page, totalPages, total) {
@@ -219,16 +287,22 @@
firstBtn.disabled = page <= 1;
prevBtn.disabled = page <= 1;
- nextBtn.disabled = page >= totalPages;
- lastBtn.disabled = page >= totalPages;
+ nextBtn.disabled = page >= totalPages || totalPages === 0;
+ lastBtn.disabled = page >= totalPages || totalPages === 0;
}
showLoading() {
- document.getElementById("loadingIndicator").style.display = "block";
+ const indicator = document.getElementById("loadingIndicator");
+ if (indicator) {
+ indicator.style.display = "block";
+ }
}
hideLoading() {
- document.getElementById("loadingIndicator").style.display = "none";
+ const indicator = document.getElementById("loadingIndicator");
+ if (indicator) {
+ indicator.style.display = "none";
+ }
}
}
diff --git a/src/routes/logs.js b/src/routes/logs.js
index ddec110..ce5a233 100644
--- a/src/routes/logs.js
+++ b/src/routes/logs.js
@@ -23,12 +23,16 @@
});
router.post("/logs", secured, (req, res) => {
+ const start = process.hrtime.bigint();
+
const log_level = req.query.log_level || "*";
const date = req.query.date || "*";
const page = parseInt(req.query.page) || 1;
const limit = parseInt(req.query.limit) || 50;
const offset = (page - 1) * limit;
+ const parseStart = process.hrtime.bigint();
+
if (log_level !== "*" && !allowedLevels.includes(log_level)) {
return res.status(400).json({ error: "Invalid log_level" });
}
@@ -37,12 +41,12 @@
const params = [];
if (log_level !== "*") {
- conditions.push("l.level = ?");
+ conditions.push("level = ?");
params.push(log_level);
}
if (date !== "*") {
- conditions.push("date(l.timestamp) = ?");
+ conditions.push("date(timestamp) = ?");
params.push(date);
}
@@ -50,55 +54,81 @@
? "WHERE " + conditions.join(" AND ")
: "";
- // Get total count for pagination
- const countQuery = `
- SELECT COUNT(DISTINCT l.id) as total
- FROM logs l
- ${whereClause}
- `;
+ const countStart = process.hrtime.bigint();
+ // Count query - simple and fast
+ const countQuery = `SELECT COUNT(*) as total FROM logs ${whereClause}`;
const totalResult = db.prepare(countQuery).get(...params);
const total = totalResult.total;
- // Get paginated results
- const query = `
- SELECT
- l.id,
- l.timestamp,
- l.level,
- GROUP_CONCAT(k.key || '=' || m.value, '||') AS meta_kv
- FROM logs l
- LEFT JOIN log_metadata m ON m.log_id = l.id
- LEFT JOIN keys k ON k.id = m.key_id
+ const queryStart = process.hrtime.bigint();
+
+ // STEP 1: Get just the log records we need (fast!)
+ const logQuery = `
+ SELECT id, timestamp, level
+ FROM logs
${whereClause}
- GROUP BY l.id
- ORDER BY l.timestamp DESC
+ ORDER BY timestamp DESC
LIMIT ? OFFSET ?
`;
try {
- const rows = db.prepare(query).all(...params, limit, offset);
+ const logRows = db.prepare(logQuery).all(...params, limit, offset);
- const logs = rows.map((row) => {
- const meta = {};
- if (row.meta_kv) {
- for (const pair of row.meta_kv.split("||")) {
- const [k, v] = pair.split("=");
- if (k && v !== undefined) {
- try {
- meta[k] = JSON.parse(v);
- } catch {
- meta[k] = v;
- }
- }
- }
+ if (logRows.length === 0) {
+ return res.json({
+ logs: [],
+ pagination: { page, limit, total, totalPages: 0, hasMore: false },
+ });
+ }
+
+ // STEP 2: Get metadata only for these specific logs
+ const logIds = logRows.map((row) => row.id);
+ const placeholders = logIds.map(() => "?").join(",");
+
+ const metadataQuery = `
+ SELECT
+ m.log_id,
+ k.key,
+ m.value
+ FROM log_metadata m
+ JOIN keys k ON k.id = m.key_id
+ WHERE m.log_id IN (${placeholders})
+ `;
+
+ const metadataRows = db.prepare(metadataQuery).all(...logIds);
+
+ const mapStart = process.hrtime.bigint();
+
+ // STEP 3: Build metadata lookup map
+ const metadataMap = {};
+ metadataRows.forEach((row) => {
+ if (!metadataMap[row.log_id]) {
+ metadataMap[row.log_id] = {};
}
- return {
- id: row.id,
- timestamp: row.timestamp,
- level: row.level,
- ...meta,
- };
+ try {
+ metadataMap[row.log_id][row.key] = JSON.parse(row.value);
+ } catch {
+ metadataMap[row.log_id][row.key] = row.value;
+ }
+ });
+
+ // STEP 4: Combine logs with their metadata
+ const logs = logRows.map((row) => ({
+ id: row.id,
+ timestamp: row.timestamp,
+ level: row.level,
+ ...(metadataMap[row.id] || {}),
+ }));
+
+ const end = process.hrtime.bigint();
+
+ req.log.info("logs route timings", {
+ totalMs: Number(end - start) / 1e6,
+ parseMs: Number(parseStart - start) / 1e6,
+ countMs: Number(queryStart - countStart) / 1e6,
+ queryMs: Number(mapStart - queryStart) / 1e6,
+ mapMs: Number(end - mapStart) / 1e6,
});
res.json({
@@ -111,7 +141,8 @@
hasMore: page < Math.ceil(total / limit),
},
});
- } catch {
+ } catch (error) {
+ console.error("Query error:", error);
res.status(500).json({ error: "Failed to query logs" });
}
});
diff --git a/src/utils/SQLiteTransport.js b/src/utils/SQLiteTransport.js
index 69c797b..095610a 100644
--- a/src/utils/SQLiteTransport.js
+++ b/src/utils/SQLiteTransport.js
@@ -25,13 +25,19 @@
FOREIGN KEY(log_id) REFERENCES logs(id) ON DELETE CASCADE,
FOREIGN KEY(key_id) REFERENCES keys(id) ON DELETE CASCADE
);
- CREATE INDEX IF NOT EXISTS idx_logs_timestamp ON logs(timestamp);
- CREATE INDEX IF NOT EXISTS idx_logs_level ON logs(level);
- CREATE INDEX IF NOT EXISTS idx_log_metadata_logid_keyid ON log_metadata(log_id, key_id);
- CREATE INDEX IF NOT EXISTS idx_log_metadata_keyid_value ON log_metadata(key_id, value);
- CREATE UNIQUE INDEX IF NOT EXISTS idx_keys_key ON keys(key);
+
+ -- CRITICAL: These indexes will make your queries 100x faster
+ CREATE INDEX IF NOT EXISTS idx_logs_timestamp_desc ON logs(timestamp DESC);
+ CREATE INDEX IF NOT EXISTS idx_logs_level_timestamp ON logs(level, timestamp DESC);
+ CREATE INDEX IF NOT EXISTS idx_log_metadata_log_id ON log_metadata(log_id);
+ CREATE INDEX IF NOT EXISTS idx_keys_key ON keys(key);
`);
+ // Set SQLite for better performance
+ this.db.pragma("journal_mode = WAL");
+ this.db.pragma("synchronous = NORMAL");
+ this.db.pragma("cache_size = 10000");
+
this.insertLog = this.db.prepare(
`INSERT INTO logs (timestamp, level) VALUES (?, ?)`
);
@@ -51,7 +57,6 @@
return row.id;
}
- // Helper function to safely stringify values
safeStringify(value) {
if (value === null || value === undefined) {
return "";
@@ -83,7 +88,6 @@
const result = this.insertLog.run(timestamp, level);
const logId = result.lastInsertRowid;
- // Store message
if (message) {
const messageKeyId = this.getOrCreateKeyId("message");
this.insertMetadata.run(
@@ -93,7 +97,6 @@
);
}
- // Store all metadata
for (const [key, value] of Object.entries(meta)) {
const keyId = this.getOrCreateKeyId(key);
this.insertMetadata.run(logId, keyId, this.safeStringify(value));
@@ -104,7 +107,6 @@
insertLogTxn();
} catch (error) {
console.error("SQLite logging error:", error);
- // Don't fail silently - this could hide important issues
}
callback();