Compare commits

...

2 Commits

Author SHA1 Message Date
Your Name
2ed4b96058 v1.1.8 - Add comprehensive database query logging with timing at debug level 3, fix schema version compatibility (v6-v9), add version logging at startup, allow monitoring throttle=0 to disable monitoring 2026-02-02 11:20:11 -04:00
Your Name
c0051b22be v1.1.7 - Add per-connection database query tracking for abuse detection
Implemented comprehensive database query tracking to identify clients causing
high CPU usage through excessive database queries. The relay now tracks and
displays query statistics per WebSocket connection in the admin UI.

Features Added:
- Track db_queries_executed and db_rows_returned per connection
- Calculate query rate (queries/minute) and row rate (rows/minute)
- Display stats in admin UI grouped by IP address and WebSocket
- Show: IP, Subscriptions, Queries, Rows, Query Rate, Duration

Implementation:
- Added tracking fields to per_session_data structure
- Increment counters in handle_req_message() and handle_count_message()
- Extract stats from pss in query_subscription_details()
- Updated admin UI to display IP address and query metrics

Use Case:
Admins can now identify abusive clients by monitoring:
- High query rates (>50 queries/min indicates polling abuse)
- High row counts (>10K rows/min indicates broad filter abuse)
- Query patterns (high queries + low rows = targeted, high both = crawler)

This enables informed decisions about which IPs to blacklist based on
actual resource consumption rather than just connection count.
2026-02-01 16:26:37 -04:00
9 changed files with 246 additions and 22 deletions

View File

@@ -107,13 +107,13 @@ COPY Makefile /build/Makefile
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
# Use conditional compilation flags based on DEBUG_BUILD argument
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
CFLAGS="-g -O0 -DDEBUG"; \
STRIP_CMD=""; \
echo "Building with DEBUG symbols enabled"; \
CFLAGS="-g -O2 -DDEBUG"; \
STRIP_CMD="echo 'Keeping debug symbols'"; \
echo "Building with DEBUG symbols enabled (optimized with -O2)"; \
else \
CFLAGS="-O2"; \
STRIP_CMD="strip /build/c_relay_static"; \
echo "Building optimized production binary"; \
echo "Building optimized production binary (symbols stripped)"; \
fi && \
gcc -static $CFLAGS -Wall -Wextra -std=c99 \
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \

View File

@@ -4324,6 +4324,12 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
const oldestDuration = Math.max(...subscriptions.map(s => now - s.created_at));
const oldestDurationStr = formatDuration(oldestDuration);
// Calculate total query stats for this connection
const totalQueries = subscriptions.reduce((sum, s) => sum + (s.db_queries_executed || 0), 0);
const totalRows = subscriptions.reduce((sum, s) => sum + (s.db_rows_returned || 0), 0);
const avgQueryRate = subscriptions.length > 0 ? (subscriptions[0].query_rate_per_min || 0) : 0;
const clientIp = subscriptions.length > 0 ? (subscriptions[0].client_ip || 'unknown') : 'unknown';
// Create header row (summary)
const headerRow = document.createElement('tr');
headerRow.className = 'subscription-group-header';
@@ -4334,9 +4340,14 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
headerRow.innerHTML = `
<td colspan="4" style="padding: 8px;">
<span class="expand-icon" style="display: inline-block; width: 20px; transition: transform 0.2s;">▶</span>
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">Websocket: ${wsiPointer}</strong>
<span style="color: #666; margin-left: 15px;">
Subscriptions: ${subCount} | Oldest: ${oldestDurationStr}
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">IP: ${clientIp}</strong>
<span style="color: #666; margin-left: 10px; font-size: 11px;">
WS: ${wsiPointer} |
Subs: ${subCount} |
Queries: ${totalQueries.toLocaleString()} |
Rows: ${totalRows.toLocaleString()} |
Rate: ${avgQueryRate.toFixed(1)} q/min |
Duration: ${oldestDurationStr}
</span>
</td>
`;

View File

@@ -1 +1 @@
1979749
2211317

View File

@@ -1,6 +1,11 @@
// Define _GNU_SOURCE to ensure all POSIX features are available
#define _GNU_SOURCE
// Forward declaration for query logging (defined in main.c)
extern void log_query_execution(const char* query_type, const char* sub_id,
const char* client_ip, const char* sql,
long elapsed_us, int rows_returned);
// API module for serving embedded web content and admin API functions
#include <stdio.h>
#include <stdlib.h>
@@ -66,6 +71,10 @@ cJSON* query_event_kind_distribution(void) {
sqlite3_stmt* stmt;
const char* sql = "SELECT kind, COUNT(*) as count FROM events GROUP BY kind ORDER BY count DESC";
// Start timing
struct timespec query_start, query_end;
clock_gettime(CLOCK_MONOTONIC, &query_start);
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare event kind distribution query");
return NULL;
@@ -77,8 +86,10 @@ cJSON* query_event_kind_distribution(void) {
cJSON* kinds_array = cJSON_CreateArray();
long long total_events = 0;
int row_count = 0;
while (sqlite3_step(stmt) == SQLITE_ROW) {
row_count++;
int kind = sqlite3_column_int(stmt, 0);
long long count = sqlite3_column_int64(stmt, 1);
total_events += count;
@@ -90,6 +101,13 @@ cJSON* query_event_kind_distribution(void) {
}
sqlite3_finalize(stmt);
// Stop timing and log
clock_gettime(CLOCK_MONOTONIC, &query_end);
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
log_query_execution("MONITOR", "event_kinds", NULL, sql, elapsed_us, row_count);
cJSON_AddNumberToObject(distribution, "total_events", total_events);
cJSON_AddItemToObject(distribution, "kinds", kinds_array);
@@ -245,6 +263,10 @@ cJSON* query_subscription_details(void) {
DEBUG_LOG("=== SUBSCRIPTION_DETAILS QUERY DEBUG ===");
DEBUG_LOG("Query: %s", sql);
// Start timing
struct timespec query_start, query_end;
clock_gettime(CLOCK_MONOTONIC, &query_start);
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
DEBUG_ERROR("Failed to prepare subscription details query");
return NULL;
@@ -287,6 +309,46 @@ cJSON* query_subscription_details(void) {
cJSON_AddBoolToObject(sub_obj, "active", 1); // All from this view are active
cJSON_AddStringToObject(sub_obj, "wsi_pointer", wsi_pointer ? wsi_pointer : "N/A");
// Extract query stats from per_session_data if wsi is still valid
int db_queries = 0;
int db_rows = 0;
double query_rate = 0.0;
double row_rate = 0.0;
double avg_rows_per_query = 0.0;
if (wsi_pointer && strlen(wsi_pointer) > 2) { // Check for valid pointer string
// Parse wsi pointer from hex string
struct lws* wsi = NULL;
if (sscanf(wsi_pointer, "%p", (void**)&wsi) == 1 && wsi != NULL) {
// Get per_session_data from wsi
struct per_session_data* pss = (struct per_session_data*)lws_wsi_user(wsi);
if (pss) {
db_queries = pss->db_queries_executed;
db_rows = pss->db_rows_returned;
// Calculate rates (per minute)
time_t connection_duration = current_time - pss->query_tracking_start;
if (connection_duration > 0) {
double minutes = connection_duration / 60.0;
query_rate = db_queries / minutes;
row_rate = db_rows / minutes;
}
// Calculate average rows per query
if (db_queries > 0) {
avg_rows_per_query = (double)db_rows / (double)db_queries;
}
}
}
}
// Add query stats to subscription object
cJSON_AddNumberToObject(sub_obj, "db_queries_executed", db_queries);
cJSON_AddNumberToObject(sub_obj, "db_rows_returned", db_rows);
cJSON_AddNumberToObject(sub_obj, "query_rate_per_min", query_rate);
cJSON_AddNumberToObject(sub_obj, "row_rate_per_min", row_rate);
cJSON_AddNumberToObject(sub_obj, "avg_rows_per_query", avg_rows_per_query);
// Parse and add filter JSON if available
if (filter_json) {
cJSON* filters = cJSON_Parse(filter_json);
@@ -311,8 +373,15 @@ cJSON* query_subscription_details(void) {
cJSON_AddItemToObject(subscriptions_data, "data", data);
// Stop timing and log
clock_gettime(CLOCK_MONOTONIC, &query_end);
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
log_query_execution("MONITOR", "subscription_details", NULL, sql, elapsed_us, row_count);
// DEBUG: Log final summary
DEBUG_LOG("Total subscriptions found: %d", cJSON_GetArraySize(subscriptions_array));
DEBUG_LOG("Total subscriptions found: %d", row_count);
DEBUG_LOG("=== END SUBSCRIPTION_DETAILS QUERY DEBUG ===");
return subscriptions_data;
@@ -459,10 +528,15 @@ int generate_monitoring_event_for_type(const char* d_tag_value, cJSON* (*query_f
// Monitoring hook called when an event is stored
void monitoring_on_event_stored(void) {
// Check throttling first (cheapest check)
// Check if monitoring is disabled (throttle = 0)
int throttle_seconds = get_monitoring_throttle_seconds();
if (throttle_seconds == 0) {
return; // Monitoring disabled
}
// Check throttling
static time_t last_monitoring_time = 0;
time_t current_time = time(NULL);
int throttle_seconds = get_monitoring_throttle_seconds();
if (current_time - last_monitoring_time < throttle_seconds) {
return;
@@ -481,10 +555,15 @@ void monitoring_on_event_stored(void) {
// Monitoring hook called when subscriptions change (create/close)
void monitoring_on_subscription_change(void) {
// Check throttling first (cheapest check)
// Check if monitoring is disabled (throttle = 0)
int throttle_seconds = get_monitoring_throttle_seconds();
if (throttle_seconds == 0) {
return; // Monitoring disabled
}
// Check throttling
static time_t last_monitoring_time = 0;
time_t current_time = time(NULL);
int throttle_seconds = get_monitoring_throttle_seconds();
if (current_time - last_monitoring_time < throttle_seconds) {
return;
@@ -2721,8 +2800,8 @@ int handle_monitoring_command(cJSON* event, const char* command, char* error_mes
char* endptr;
long throttle_seconds = strtol(arg, &endptr, 10);
if (*endptr != '\0' || throttle_seconds < 1 || throttle_seconds > 3600) {
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 1 and 3600 seconds.";
if (*endptr != '\0' || throttle_seconds < 0 || throttle_seconds > 3600) {
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 0 and 3600 seconds (0 = disabled).";
return send_admin_response(sender_pubkey, response_content, request_id, error_message, error_size, wsi);
}

File diff suppressed because one or more lines are too long

View File

@@ -229,6 +229,65 @@ void send_notice_message(struct lws* wsi, struct per_session_data* pss, const ch
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// DATABASE QUERY LOGGING
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
/**
* Log database query execution with timing and context
* Only logs at debug level 3 (DEBUG) or higher
* Warns if query takes >10ms (slow query)
*
* @param query_type Type of query (REQ, COUNT, INSERT, CONFIG, etc.)
* @param sub_id Subscription ID (NULL if not applicable)
* @param client_ip Client IP address (NULL if not applicable)
* @param sql SQL query text
* @param elapsed_us Execution time in microseconds
* @param rows_returned Number of rows returned or affected
*/
void log_query_execution(const char* query_type, const char* sub_id,
const char* client_ip, const char* sql,
long elapsed_us, int rows_returned) {
// Only log at debug level 3 (INFO) or higher
if (g_debug_level < DEBUG_LEVEL_INFO) {
return;
}
// Truncate SQL if too long (keep first 500 chars)
char sql_truncated[512];
if (strlen(sql) > 500) {
snprintf(sql_truncated, sizeof(sql_truncated), "%.497s...", sql);
} else {
snprintf(sql_truncated, sizeof(sql_truncated), "%s", sql);
}
// Get timestamp
time_t now = time(NULL);
struct tm* tm_info = localtime(&now);
char timestamp[32];
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
// Log query with all context (direct to stdout/stderr, not through DEBUG_LOG)
fprintf(stderr, "[%s] [QUERY] type=%s sub=%s ip=%s time=%ldus rows=%d sql=%s\n",
timestamp,
query_type,
sub_id ? sub_id : "N/A",
client_ip ? client_ip : "N/A",
elapsed_us,
rows_returned,
sql_truncated);
// Warn if query is slow (>10ms = 10000us)
if (elapsed_us > 10000) {
fprintf(stderr, "[%s] [SLOW_QUERY] %ldms: %s\n",
timestamp, elapsed_us / 1000, sql_truncated);
}
fflush(stderr);
}
/////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// DATABASE FUNCTIONS
@@ -370,12 +429,17 @@ int init_database(const char* database_path_override) {
if (!db_version || strcmp(db_version, "5") == 0) {
needs_migration = 1;
} else if (strcmp(db_version, "6") == 0) {
// Database is already at current schema version v6
// Database is at schema version v6 (compatible)
} else if (strcmp(db_version, "7") == 0) {
// Database is at schema version v7 (compatible)
} else if (strcmp(db_version, "8") == 0) {
// Database is at schema version v8 (compatible)
} else if (strcmp(db_version, EMBEDDED_SCHEMA_VERSION) == 0) {
// Database is at current schema version
} else {
char warning_msg[256];
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s", db_version);
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s (expected %s)",
db_version, EMBEDDED_SCHEMA_VERSION);
DEBUG_WARN(warning_msg);
}
} else {
@@ -1192,6 +1256,10 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
snprintf(sql_ptr, remaining, " LIMIT 500");
}
// Start query timing
struct timespec query_start, query_end;
clock_gettime(CLOCK_MONOTONIC, &query_start);
// Execute query and send events
sqlite3_stmt* stmt;
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
@@ -1199,9 +1267,30 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
char error_msg[256];
snprintf(error_msg, sizeof(error_msg), "Failed to prepare subscription query: %s", sqlite3_errmsg(g_db));
DEBUG_ERROR(error_msg);
// Log the failed query so we can see what SQL was generated
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
time_t now = time(NULL);
struct tm* tm_info = localtime(&now);
char timestamp[32];
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
fprintf(stderr, "[%s] [QUERY_FAILED] type=REQ sub=%s ip=%s error=%s sql=%s\n",
timestamp,
sub_id,
pss ? pss->client_ip : "N/A",
sqlite3_errmsg(g_db),
sql);
fflush(stderr);
}
continue;
}
// Track query execution for abuse detection
if (pss) {
pss->db_queries_executed++;
}
// Bind parameters
for (int i = 0; i < bind_param_count; i++) {
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
@@ -1211,6 +1300,11 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
while (sqlite3_step(stmt) == SQLITE_ROW) {
row_count++;
// Track rows returned for abuse detection
if (pss) {
pss->db_rows_returned++;
}
// Build event JSON
cJSON* event = cJSON_CreateObject();
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
@@ -1265,6 +1359,14 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
}
sqlite3_finalize(stmt);
// Stop query timing and log
clock_gettime(CLOCK_MONOTONIC, &query_end);
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
log_query_execution("REQ", sub_id, pss ? pss->client_ip : NULL,
sql, elapsed_us, row_count);
}
// Cleanup bind params
@@ -1563,6 +1665,10 @@ int main(int argc, char* argv[]) {
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
// Print version at startup (always, regardless of debug level)
fprintf(stderr, "[RELAY_VERSION] C Nostr Relay Server %s\n", CRELAY_VERSION);
fflush(stderr);
printf(BLUE BOLD "=== C Nostr Relay Server ===" RESET "\n");

View File

@@ -13,8 +13,8 @@
// Using CRELAY_ prefix to avoid conflicts with nostr_core_lib VERSION macros
#define CRELAY_VERSION_MAJOR 1
#define CRELAY_VERSION_MINOR 1
#define CRELAY_VERSION_PATCH 6
#define CRELAY_VERSION "v1.1.6"
#define CRELAY_VERSION_PATCH 8
#define CRELAY_VERSION "v1.1.8"
// Relay metadata (authoritative source for NIP-11 information)
#define RELAY_NAME "C-Relay"

View File

@@ -86,6 +86,11 @@ int is_event_expired(cJSON* event, time_t current_time);
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
// Forward declaration for query logging (defined in main.c)
extern void log_query_execution(const char* query_type, const char* sub_id,
const char* client_ip, const char* sql,
long elapsed_us, int rows_returned);
// Forward declarations for rate limiting
int is_client_rate_limited_for_malformed_requests(struct per_session_data *pss);
void record_malformed_request(struct per_session_data *pss);
@@ -391,6 +396,11 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
memset(pss, 0, sizeof(*pss));
pthread_mutex_init(&pss->session_lock, NULL);
// Initialize database query tracking
pss->db_queries_executed = 0;
pss->db_rows_returned = 0;
pss->query_tracking_start = time(NULL);
// Get real client IP address
char client_ip[CLIENT_IP_MAX_LENGTH];
memset(client_ip, 0, sizeof(client_ip));
@@ -2429,7 +2439,7 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
// Handle NIP-45 COUNT message
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss) {
(void)pss; // Suppress unused parameter warning
// pss is now used for query tracking, so remove unused warning suppression
if (!cJSON_IsArray(filters)) {
DEBUG_ERROR("COUNT filters is not an array");
@@ -2687,6 +2697,10 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
}
// Execute count query
// Start query timing
struct timespec query_start, query_end;
clock_gettime(CLOCK_MONOTONIC, &query_start);
// Execute count query
sqlite3_stmt* stmt;
@@ -2711,6 +2725,15 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
// Filter count calculated
sqlite3_finalize(stmt);
// Stop query timing and log
clock_gettime(CLOCK_MONOTONIC, &query_end);
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
log_query_execution("COUNT", sub_id, pss ? pss->client_ip : NULL,
sql, elapsed_us, 1); // COUNT always returns 1 row
total_count += filter_count;
}

View File

@@ -79,6 +79,11 @@ struct per_session_data {
size_t reassembly_size; // Current size of accumulated data
size_t reassembly_capacity; // Allocated capacity of reassembly buffer
int reassembly_active; // Flag: 1 if currently reassembling a message
// Database query tracking for abuse detection and monitoring
int db_queries_executed; // Total SELECT queries executed by this connection
int db_rows_returned; // Total rows returned across all queries
time_t query_tracking_start; // When connection was established (for rate calculation)
};
// NIP-11 HTTP session data structure for managing buffer lifetime