diff --git a/public/js/logs.js b/public/js/logs.js
index e92c9a0..07cb673 100644
--- a/public/js/logs.js
+++ b/public/js/logs.js
@@ -1,6 +1,7 @@
class LogsViewer {
constructor() {
this.form = document.getElementById("filterForm");
+ this.log_path = document.getElementById("log_path");
this.theadRow = document.getElementById("logsTableHeaderRow");
this.tbody = document.querySelector("#logsTable tbody");
this.currentPage = 1;
@@ -148,7 +149,7 @@
params.append("limit", this.limit);
try {
- const res = await fetch("/admin/logs", {
+ const res = await fetch(this.log_path.value, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
@@ -164,6 +165,8 @@
this.hasMore = data.pagination.hasMore;
this.totalPages = data.pagination.totalPages;
+ console.log(data.logs);
+
if (data.logs.length === 0 && !append) {
this.tbody.innerHTML = '
| No logs found |
';
this.updatePaginationInfo(0, 0, 0);
diff --git a/src/middleware/analytics.js b/src/middleware/analytics.js
index f80ddd1..57bcb9b 100644
--- a/src/middleware/analytics.js
+++ b/src/middleware/analytics.js
@@ -2,20 +2,17 @@
module.exports = (req, res, next) => {
if (req.method === "GET" && req.accepts("html")) {
- const ip = req.ip;
- // const ip =
- // req.headers["x-forwarded-for"]?.split(",")[0] ||
- // req.connection.remoteAddress ||
- // "";
+ const forwardedIp = req.ip;
+ const directIp = req.connection.remoteAddress;
const timestamp = Date.now();
const url = req.originalUrl;
const referrer = req.get("Referer") || "";
const userAgent = req.get("User-Agent") || "";
db.run(
- `INSERT INTO analytics (timestamp, url, referrer, user_agent, js_enabled, ip)
- VALUES (?, ?, ?, ?, ?, ?)`,
- [timestamp, url, referrer, userAgent, 0, ip]
+ `INSERT INTO analytics (timestamp, url, referrer, user_agent, js_enabled, forwardedIp, directIp)
+ VALUES (?, ?, ?, ?, ?, ?, ?)`, // fixme
+ [timestamp, url, referrer, userAgent, 0, forwardedIp, directIp]
);
}
next();
diff --git a/src/routes/admin/dskManager.js b/src/routes/admin/dskManager.js
deleted file mode 100644
index 61471c1..0000000
--- a/src/routes/admin/dskManager.js
+++ /dev/null
@@ -1,128 +0,0 @@
-// routes/admin.js
-const express = require("express");
-const { diskSpaceMonitor } = require("../utils/logging");
-
-const router = express.Router();
-
-// Middleware to check admin authentication (implement as needed)
-const requireAdmin = (req, res, next) => {
- // Add your admin authentication logic here
- // For example, check session, JWT token, etc.
- if (req.session && req.session.isAdmin) {
- next();
- } else {
- res.status(403).json({ error: "Admin access required" });
- }
-};
-
-// Apply admin middleware to all routes
-router.use(requireAdmin);
-
-// Apply disk space monitoring middleware
-router.use(diskSpaceMonitor.adminNotificationMiddleware());
-
-// Get disk space status
-router.get("/disk-space/status", diskSpaceMonitor.getStatusEndpoint());
-
-// Perform manual cleanup
-router.post("/disk-space/cleanup", diskSpaceMonitor.manualCleanupEndpoint());
-
-// Get disk space configuration
-router.get("/disk-space/config", (req, res) => {
- res.json({
- success: true,
- data: {
- thresholds: {
- warning: diskSpaceMonitor.options.warningThreshold,
- critical: diskSpaceMonitor.options.criticalThreshold,
- emergency: diskSpaceMonitor.options.emergencyThreshold,
- },
- cleanup: {
- normalCleanupDays: diskSpaceMonitor.options.normalCleanupDays,
- warningCleanupDays: diskSpaceMonitor.options.warningCleanupDays,
- criticalCleanupDays: diskSpaceMonitor.options.criticalCleanupDays,
- emergencyCleanupDays: diskSpaceMonitor.options.emergencyCleanupDays,
- },
- monitoring: {
- interval: diskSpaceMonitor.options.monitoringInterval,
- maxLogDirectorySize: diskSpaceMonitor.options.maxLogDirectorySize,
- },
- },
- });
-});
-
-// Update disk space configuration
-router.put("/disk-space/config", (req, res) => {
- try {
- const { thresholds, cleanup, monitoring } = req.body;
-
- if (thresholds) {
- Object.assign(diskSpaceMonitor.options, thresholds);
- }
-
- if (cleanup) {
- Object.assign(diskSpaceMonitor.options, cleanup);
- }
-
- if (monitoring) {
- Object.assign(diskSpaceMonitor.options, monitoring);
- // Restart monitoring with new interval
- diskSpaceMonitor.startMonitoring();
- }
-
- res.json({
- success: true,
- message: "Configuration updated successfully",
- data: diskSpaceMonitor.options,
- });
- } catch (error) {
- res.status(500).json({
- success: false,
- error: "Failed to update configuration",
- details: error.message,
- });
- }
-});
-
-// Get log directory contents
-router.get("/logs/directory", async (req, res) => {
- try {
- const fs = require("fs").promises;
- const path = require("path");
-
- const logDir = path.join(__dirname, "..", "..", "logs");
- const getDirectoryInfo = async (dir) => {
- const items = await fs.readdir(dir);
- const info = [];
-
- for (const item of items) {
- const itemPath = path.join(dir, item);
- const stats = await fs.stat(itemPath);
-
- info.push({
- name: item,
- type: stats.isDirectory() ? "directory" : "file",
- size: stats.size,
- modified: stats.mtime,
- relativePath: path.relative(logDir, itemPath),
- });
- }
-
- return info.sort((a, b) => b.modified - a.modified);
- };
-
- const contents = await getDirectoryInfo(logDir);
- res.json({
- success: true,
- data: contents,
- });
- } catch (error) {
- res.status(500).json({
- success: false,
- error: "Failed to get log directory contents",
- details: error.message,
- });
- }
-});
-
-module.exports = router;
diff --git a/src/routes/admin/dskMonitor.js b/src/routes/admin/dskMonitor.js
new file mode 100644
index 0000000..61471c1
--- /dev/null
+++ b/src/routes/admin/dskMonitor.js
@@ -0,0 +1,128 @@
+// routes/admin.js
+const express = require("express");
+const { diskSpaceMonitor } = require("../utils/logging");
+
+const router = express.Router();
+
+// Middleware to check admin authentication (implement as needed)
+const requireAdmin = (req, res, next) => {
+ // Add your admin authentication logic here
+ // For example, check session, JWT token, etc.
+ if (req.session && req.session.isAdmin) {
+ next();
+ } else {
+ res.status(403).json({ error: "Admin access required" });
+ }
+};
+
+// Apply admin middleware to all routes
+router.use(requireAdmin);
+
+// Apply disk space monitoring middleware
+router.use(diskSpaceMonitor.adminNotificationMiddleware());
+
+// Get disk space status
+router.get("/disk-space/status", diskSpaceMonitor.getStatusEndpoint());
+
+// Perform manual cleanup
+router.post("/disk-space/cleanup", diskSpaceMonitor.manualCleanupEndpoint());
+
+// Get disk space configuration
+router.get("/disk-space/config", (req, res) => {
+ res.json({
+ success: true,
+ data: {
+ thresholds: {
+ warning: diskSpaceMonitor.options.warningThreshold,
+ critical: diskSpaceMonitor.options.criticalThreshold,
+ emergency: diskSpaceMonitor.options.emergencyThreshold,
+ },
+ cleanup: {
+ normalCleanupDays: diskSpaceMonitor.options.normalCleanupDays,
+ warningCleanupDays: diskSpaceMonitor.options.warningCleanupDays,
+ criticalCleanupDays: diskSpaceMonitor.options.criticalCleanupDays,
+ emergencyCleanupDays: diskSpaceMonitor.options.emergencyCleanupDays,
+ },
+ monitoring: {
+ interval: diskSpaceMonitor.options.monitoringInterval,
+ maxLogDirectorySize: diskSpaceMonitor.options.maxLogDirectorySize,
+ },
+ },
+ });
+});
+
+// Update disk space configuration
+router.put("/disk-space/config", (req, res) => {
+ try {
+ const { thresholds, cleanup, monitoring } = req.body;
+
+ if (thresholds) {
+ Object.assign(diskSpaceMonitor.options, thresholds);
+ }
+
+ if (cleanup) {
+ Object.assign(diskSpaceMonitor.options, cleanup);
+ }
+
+ if (monitoring) {
+ Object.assign(diskSpaceMonitor.options, monitoring);
+ // Restart monitoring with new interval
+ diskSpaceMonitor.startMonitoring();
+ }
+
+ res.json({
+ success: true,
+ message: "Configuration updated successfully",
+ data: diskSpaceMonitor.options,
+ });
+ } catch (error) {
+ res.status(500).json({
+ success: false,
+ error: "Failed to update configuration",
+ details: error.message,
+ });
+ }
+});
+
+// Get log directory contents
+router.get("/logs/directory", async (req, res) => {
+ try {
+ const fs = require("fs").promises;
+ const path = require("path");
+
+ const logDir = path.join(__dirname, "..", "..", "logs");
+ const getDirectoryInfo = async (dir) => {
+ const items = await fs.readdir(dir);
+ const info = [];
+
+ for (const item of items) {
+ const itemPath = path.join(dir, item);
+ const stats = await fs.stat(itemPath);
+
+ info.push({
+ name: item,
+ type: stats.isDirectory() ? "directory" : "file",
+ size: stats.size,
+ modified: stats.mtime,
+ relativePath: path.relative(logDir, itemPath),
+ });
+ }
+
+ return info.sort((a, b) => b.modified - a.modified);
+ };
+
+ const contents = await getDirectoryInfo(logDir);
+ res.json({
+ success: true,
+ data: contents,
+ });
+ } catch (error) {
+ res.status(500).json({
+ success: false,
+ error: "Failed to get log directory contents",
+ details: error.message,
+ });
+ }
+});
+
+module.exports = router;
diff --git a/src/routes/analytics.js b/src/routes/analytics.js
index ad7e01f..6c8f12a 100644
--- a/src/routes/analytics.js
+++ b/src/routes/analytics.js
@@ -11,18 +11,25 @@
event = "",
} = req.body;
- // const ip =
- // req.headers["x-forwarded-for"]?.split(",")[0] ||
- // req.connection.remoteAddress ||
- // "";
- const ip = req.ip;
+ const forwardedIp = req.ip;
+ const directIp = req.connection.remoteAddress;
const timestamp = Date.now();
db.run(
- `INSERT INTO analytics (timestamp, url, referrer, user_agent, viewport, load_time, event, ip, js_enabled)
- VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
- [timestamp, url, referrer, userAgent, viewport, loadTime, event, ip, 1]
+ `INSERT INTO analytics (timestamp, url, referrer, user_agent, viewport, load_time, event, forwardedIp, directIp, js_enabled)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
+ [
+ timestamp,
+ url,
+ referrer,
+ userAgent,
+ viewport,
+ loadTime,
+ event,
+ forwardedIp,
+ directIp,
+ 1,
+ ]
);
- // res.send("Tracked");
res.sendStatus(204);
};
diff --git a/src/routes/secured/logs.js b/src/routes/secured/logs.js
index 85bbac0..f762e5b 100644
--- a/src/routes/secured/logs.js
+++ b/src/routes/secured/logs.js
@@ -6,13 +6,14 @@
const allowedLevels = ["warn", "error", "info", "debug", "functions", "notice"];
-const dbPath = path.resolve(__dirname, "../../../data/logs.sqlite3");
+const logsDbPath = path.resolve(__dirname, "../../../data/logs.sqlite3");
-if (!fs.existsSync(dbPath)) {
- fs.closeSync(fs.openSync(dbPath, "w"));
+if (!fs.existsSync(logsDbPath)) {
+ fs.closeSync(fs.openSync(logsDbPath, "w"));
}
-const db = new Database(dbPath, { readonly: true });
+const logsDb = new Database(logsDbPath, { readonly: true });
+const analyticsDb = require("../../utils/sqlite3");
router.get("/logs", (req, res) => {
res.renderWithBaseContext("admin-pages/logs", {
@@ -22,16 +23,12 @@
});
router.post("/logs", (req, res) => {
- const start = process.hrtime.bigint();
-
const log_level = req.query.log_level || "*";
const date = req.query.date || "*";
const page = parseInt(req.query.page) || 1;
const limit = parseInt(req.query.limit) || 50;
const offset = (page - 1) * limit;
- const parseStart = process.hrtime.bigint();
-
if (log_level !== "*" && !allowedLevels.includes(log_level)) {
return res.status(400).json({ error: "Invalid log_level" });
}
@@ -53,15 +50,11 @@
? "WHERE " + conditions.join(" AND ")
: "";
- const countStart = process.hrtime.bigint();
-
// Count query - simple and fast
const countQuery = `SELECT COUNT(*) as total FROM logs ${whereClause}`;
- const totalResult = db.prepare(countQuery).get(...params);
+ const totalResult = logsDb.prepare(countQuery).get(...params);
const total = totalResult.total;
- const queryStart = process.hrtime.bigint();
-
// STEP 1: Get just the log records we need (fast!)
const logQuery = `
SELECT id, timestamp, level
@@ -72,7 +65,7 @@
`;
try {
- const logRows = db.prepare(logQuery).all(...params, limit, offset);
+ const logRows = logsDb.prepare(logQuery).all(...params, limit, offset);
if (logRows.length === 0) {
return res.json({
@@ -95,9 +88,7 @@
WHERE m.log_id IN (${placeholders})
`;
- const metadataRows = db.prepare(metadataQuery).all(...logIds);
-
- const mapStart = process.hrtime.bigint();
+ const metadataRows = logsDb.prepare(metadataQuery).all(...logIds);
// STEP 3: Build metadata lookup map
const metadataMap = {};
@@ -120,16 +111,6 @@
...(metadataMap[row.id] || {}),
}));
- const end = process.hrtime.bigint();
-
- req.log.info("logs route timings", {
- totalMs: Number(end - start) / 1e6,
- parseMs: Number(parseStart - start) / 1e6,
- countMs: Number(queryStart - countStart) / 1e6,
- queryMs: Number(mapStart - queryStart) / 1e6,
- mapMs: Number(end - mapStart) / 1e6,
- });
-
res.json({
logs,
pagination: {
@@ -145,5 +126,93 @@
res.status(500).json({ error: "Failed to query logs" });
}
});
+router.post("/logs/analytics", (req, res) => {
+ const event = req.query.event || "*";
+ const date = req.query.date || "*";
+ const page = parseInt(req.query.page) || 1;
+ const limit = parseInt(req.query.limit) || 50;
+ const offset = (page - 1) * limit;
+
+ if (page < 1 || limit < 1) {
+ return res.status(400).json({ error: "Invalid pagination parameters" });
+ }
+
+ const conditions = [];
+ const params = [];
+
+ // Uncomment and modify these when you want to add filters
+ // if (event !== "*") {
+ // conditions.push("event = ?");
+ // params.push(event);
+ // }
+ // if (date !== "*") {
+ // conditions.push("date(timestamp_human) = ?");
+ // params.push(date);
+ // }
+
+ const whereClause = conditions.length
+ ? "WHERE " + conditions.join(" AND ")
+ : "";
+
+ try {
+ // Count total matching rows
+ const countQuery = `SELECT COUNT(*) as total FROM analytics_view ${whereClause}`;
+
+ analyticsDb.get(countQuery, params, (err, totalResult) => {
+ if (err) {
+ console.error("Count query error:", err);
+ return res.status(500).json({ error: "Failed to query logs" });
+ }
+
+ const total = totalResult.total;
+
+ // Query logs with pagination
+ const logsQuery = `
+ SELECT
+ id,
+ timestamp_human AS timestamp,
+ url,
+ referrer,
+ user_agent,
+ viewport,
+ load_time,
+ event,
+ forwardedIp,
+ directIp,
+ js_enabled
+ FROM analytics_view
+ ${whereClause}
+ ORDER BY timestamp DESC
+ LIMIT ? OFFSET ?
+ `;
+
+ const queryParams = [...params, limit, offset];
+
+ analyticsDb.all(logsQuery, queryParams, (err, logs) => {
+ if (err) {
+ console.error("Logs query error:", err);
+ return res.status(500).json({ error: "Failed to query logs" });
+ }
+
+ const totalPages = Math.ceil(total / limit);
+
+ console.log(logs);
+ res.json({
+ logs,
+ pagination: {
+ page,
+ limit,
+ total,
+ totalPages,
+ hasMore: page < totalPages,
+ },
+ });
+ });
+ });
+ } catch (error) {
+ console.error("Query error:", error);
+ res.status(500).json({ error: "Failed to query logs" });
+ }
+});
module.exports = router;
diff --git a/src/utils/logging.js b/src/utils/logging.js
index f21de6d..6a54266 100644
--- a/src/utils/logging.js
+++ b/src/utils/logging.js
@@ -203,9 +203,6 @@
],
});
-// Log session start
-winstonLogger.info(`Session started: ${sessionTimestamp}`);
-
// Clean up old session directories (optional)
function cleanupOldSessions() {
const sessionsDir = path.join(logDir, "sessions");
diff --git a/src/utils/sqlite3.js b/src/utils/sqlite3.js
index 0b0c3eb..411ad04 100644
--- a/src/utils/sqlite3.js
+++ b/src/utils/sqlite3.js
@@ -1,6 +1,6 @@
const sqlite3 = require("sqlite3").verbose();
-const db = new sqlite3.Database("./data/analytics.sqlite3");
+const db = new sqlite3.Database("./data/analytic2.sqlite3");
db.run(`
CREATE TABLE IF NOT EXISTS analytics (
id INTEGER PRIMARY KEY AUTOINCREMENT,
@@ -11,7 +11,8 @@
viewport TEXT,
load_time REAL,
event TEXT,
- ip TEXT,
+ forwardedIp TEXT,
+ DirectIp TEXT,
js_enabled INTEGER
)
`);
@@ -26,7 +27,8 @@
viewport,
load_time,
event,
- ip,
+ forwardedIp,
+ directIp,
js_enabled
FROM analytics;`);
module.exports = db;
diff --git a/src/utils/structuredLogger.js b/src/utils/structuredLogger.js
index eff3ea4..62a7b9e 100644
--- a/src/utils/structuredLogger.js
+++ b/src/utils/structuredLogger.js
@@ -4,8 +4,6 @@
const start = process.hrtime();
res.on("finish", () => {
- const [s, ns] = process.hrtime(start);
- const ms = (s * 1e3 + ns / 1e6).toFixed(3);
const { method, url, headers, query, body, ip, connection } = req;
const { statusCode } = res;
@@ -33,7 +31,6 @@
statusCode: String(statusCode),
directIp: String(connection.remoteAddress),
forwardedIp: String(ip),
- responseTime: `${ms}ms`,
contentLength: String(res.getHeader("content-length") || "0"),
...flatten(headers, "headers"),
...flatten(query, "query"),
diff --git a/src/views/admin-pages/logs.handlebars b/src/views/admin-pages/logs.handlebars
index 7ead8ea..1629cbd 100644
--- a/src/views/admin-pages/logs.handlebars
+++ b/src/views/admin-pages/logs.handlebars
@@ -18,6 +18,12 @@
--}}
+
+
+