Compare commits
9 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
18a7deec54 | ||
|
|
c794370a3f | ||
|
|
2ed4b96058 | ||
|
|
c0051b22be | ||
|
|
4cc2d2376e | ||
|
|
30dc4bf67d | ||
|
|
a1928cc5d7 | ||
|
|
7bf0757b1f | ||
|
|
11b0a88cdd |
@@ -107,13 +107,13 @@ COPY Makefile /build/Makefile
|
||||
# Disable fortification to avoid __*_chk symbols that don't exist in MUSL
|
||||
# Use conditional compilation flags based on DEBUG_BUILD argument
|
||||
RUN if [ "$DEBUG_BUILD" = "true" ]; then \
|
||||
CFLAGS="-g -O0 -DDEBUG"; \
|
||||
STRIP_CMD=""; \
|
||||
echo "Building with DEBUG symbols enabled"; \
|
||||
CFLAGS="-g -O2 -DDEBUG"; \
|
||||
STRIP_CMD="echo 'Keeping debug symbols'"; \
|
||||
echo "Building with DEBUG symbols enabled (optimized with -O2)"; \
|
||||
else \
|
||||
CFLAGS="-O2"; \
|
||||
STRIP_CMD="strip /build/c_relay_static"; \
|
||||
echo "Building optimized production binary"; \
|
||||
echo "Building optimized production binary (symbols stripped)"; \
|
||||
fi && \
|
||||
gcc -static $CFLAGS -Wall -Wextra -std=c99 \
|
||||
-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=0 \
|
||||
|
||||
174
Real-Time Traffic Monitoring Commands.md
Normal file
174
Real-Time Traffic Monitoring Commands.md
Normal file
@@ -0,0 +1,174 @@
|
||||
# Real-Time Traffic Monitoring Commands (Direct Server Use)
|
||||
|
||||
Copy and paste these commands directly on your server.
|
||||
|
||||
## Quick Status Checks
|
||||
|
||||
### See IPs visiting in the last few minutes:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -20
|
||||
```
|
||||
|
||||
### See what status codes they're getting:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $1, $9}' | grep '216.73.216.38'
|
||||
```
|
||||
|
||||
### Count status codes (200 vs 403):
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c
|
||||
```
|
||||
|
||||
## Real-Time Monitoring
|
||||
|
||||
### Watch live traffic (updates every 2 seconds):
|
||||
```bash
|
||||
watch -n 2 'sudo tail -200 /var/log/nginx/access.log | awk "{print \$1}" | sort | uniq -c | sort -rn | head -15'
|
||||
```
|
||||
|
||||
### See live log entries as they happen:
|
||||
```bash
|
||||
sudo tail -f /var/log/nginx/access.log
|
||||
```
|
||||
|
||||
### Live GoAccess dashboard:
|
||||
```bash
|
||||
sudo tail -f /var/log/nginx/access.log | goaccess -
|
||||
```
|
||||
|
||||
## Active Connections
|
||||
|
||||
### See who's connected RIGHT NOW:
|
||||
```bash
|
||||
sudo netstat -tn | grep ':443' | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
### Alternative (using ss command):
|
||||
```bash
|
||||
sudo ss -tn | grep ':443' | awk '{print $5}' | cut -d: -f1 | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
## Detailed Analysis
|
||||
|
||||
### Last 100 requests with timestamps:
|
||||
```bash
|
||||
sudo tail -100 /var/log/nginx/access.log | awk '{print $4, $1}' | sed 's/\[//'
|
||||
```
|
||||
|
||||
### See what blocked IPs are trying to access:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | grep '216.73.216.38' | awk '{print $7}' | head -10
|
||||
```
|
||||
|
||||
### Show all 403 (blocked) requests:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '$9==403 {print $1}' | sort | uniq -c | sort -rn
|
||||
```
|
||||
|
||||
### Show all successful (200) requests:
|
||||
```bash
|
||||
sudo tail -500 /var/log/nginx/access.log | awk '$9==200 {print $1}' | sort | uniq -c | sort -rn | head -10
|
||||
```
|
||||
|
||||
## Comprehensive Monitoring Script
|
||||
|
||||
### Create a monitoring script:
|
||||
```bash
|
||||
cat > /tmp/monitor-traffic.sh << 'EOF'
|
||||
#!/bin/bash
|
||||
echo "=== Traffic in last 5 minutes ==="
|
||||
echo "Time: $(date)"
|
||||
echo ""
|
||||
echo "Top IPs:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -10
|
||||
echo ""
|
||||
echo "Blocked IPs (403 errors):"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '$9==403 {print $1}' | sort | uniq -c | sort -rn
|
||||
echo ""
|
||||
echo "Successful requests (200):"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '$9==200 {print $1}' | sort | uniq -c | sort -rn | head -5
|
||||
echo ""
|
||||
echo "Status Code Summary:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c | sort -rn
|
||||
EOF
|
||||
chmod +x /tmp/monitor-traffic.sh
|
||||
```
|
||||
|
||||
### Run the monitoring script:
|
||||
```bash
|
||||
/tmp/monitor-traffic.sh
|
||||
```
|
||||
|
||||
## Auto-Refreshing Dashboard
|
||||
|
||||
### Live dashboard (refreshes every 5 seconds):
|
||||
```bash
|
||||
watch -n 5 'echo "=== Last 5 minutes ==="
|
||||
date
|
||||
echo ""
|
||||
echo "Top IPs:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk "{print \$1}" | sort | uniq -c | sort -rn | head -10
|
||||
echo ""
|
||||
echo "Status Codes:"
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk "{print \$9}" | sort | uniq -c | sort -rn'
|
||||
```
|
||||
|
||||
Press `Ctrl+C` to exit.
|
||||
|
||||
## GoAccess HTML Report (Live Updating)
|
||||
|
||||
### Generate live HTML report:
|
||||
```bash
|
||||
sudo goaccess /var/log/nginx/access.log -o /var/www/html/live-stats.html --real-time-html --daemonize
|
||||
```
|
||||
|
||||
Then visit: https://git.laantungir.net/live-stats.html
|
||||
|
||||
### Stop the live report:
|
||||
```bash
|
||||
sudo pkill -f "goaccess.*live-stats"
|
||||
```
|
||||
|
||||
## Filter by Time
|
||||
|
||||
### Get timestamp from 5 minutes ago:
|
||||
```bash
|
||||
date -d '5 minutes ago' '+%d/%b/%Y:%H:%M'
|
||||
```
|
||||
|
||||
### Analyze only recent logs (replace timestamp):
|
||||
```bash
|
||||
sudo awk '/01\/Feb\/2026:19:09/,0' /var/log/nginx/access.log | goaccess -
|
||||
```
|
||||
|
||||
## Check Gitea CPU
|
||||
|
||||
### Current CPU usage:
|
||||
```bash
|
||||
ps aux | grep gitea | grep -v grep
|
||||
```
|
||||
|
||||
### Watch CPU in real-time:
|
||||
```bash
|
||||
watch -n 2 'ps aux | grep gitea | grep -v grep'
|
||||
```
|
||||
|
||||
## Most Useful Command for Quick Check
|
||||
|
||||
This one-liner shows everything you need:
|
||||
```bash
|
||||
|
||||
echo "=== Quick Status ===" && \
|
||||
echo "Time: $(date)" && \
|
||||
echo "" && \
|
||||
echo "Top 10 IPs (last 1000 requests):" && \
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $1}' | sort | uniq -c | sort -rn | head -10 && \
|
||||
echo "" && \
|
||||
echo "Status Codes:" && \
|
||||
sudo tail -1000 /var/log/nginx/access.log | awk '{print $9}' | sort | uniq -c && \
|
||||
echo "" && \
|
||||
echo "Gitea CPU:" && \
|
||||
ps aux | grep gitea | grep -v grep
|
||||
```
|
||||
|
||||
Copy any of these commands and run them directly on your server!
|
||||
17
api/index.js
17
api/index.js
@@ -4324,6 +4324,12 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
|
||||
const oldestDuration = Math.max(...subscriptions.map(s => now - s.created_at));
|
||||
const oldestDurationStr = formatDuration(oldestDuration);
|
||||
|
||||
// Calculate total query stats for this connection
|
||||
const totalQueries = subscriptions.reduce((sum, s) => sum + (s.db_queries_executed || 0), 0);
|
||||
const totalRows = subscriptions.reduce((sum, s) => sum + (s.db_rows_returned || 0), 0);
|
||||
const avgQueryRate = subscriptions.length > 0 ? (subscriptions[0].query_rate_per_min || 0) : 0;
|
||||
const clientIp = subscriptions.length > 0 ? (subscriptions[0].client_ip || 'unknown') : 'unknown';
|
||||
|
||||
// Create header row (summary)
|
||||
const headerRow = document.createElement('tr');
|
||||
headerRow.className = 'subscription-group-header';
|
||||
@@ -4334,9 +4340,14 @@ function populateSubscriptionDetailsTable(subscriptionsData) {
|
||||
headerRow.innerHTML = `
|
||||
<td colspan="4" style="padding: 8px;">
|
||||
<span class="expand-icon" style="display: inline-block; width: 20px; transition: transform 0.2s;">▶</span>
|
||||
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">Websocket: ${wsiPointer}</strong>
|
||||
<span style="color: #666; margin-left: 15px;">
|
||||
Subscriptions: ${subCount} | Oldest: ${oldestDurationStr}
|
||||
<strong style="font-family: 'Courier New', monospace; font-size: 12px;">IP: ${clientIp}</strong>
|
||||
<span style="color: #666; margin-left: 10px; font-size: 11px;">
|
||||
WS: ${wsiPointer} |
|
||||
Subs: ${subCount} |
|
||||
Queries: ${totalQueries.toLocaleString()} |
|
||||
Rows: ${totalRows.toLocaleString()} |
|
||||
Rate: ${avgQueryRate.toFixed(1)} q/min |
|
||||
Duration: ${oldestDurationStr}
|
||||
</span>
|
||||
</td>
|
||||
`;
|
||||
|
||||
Binary file not shown.
@@ -188,17 +188,17 @@ update_version_in_header() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Update VERSION macro
|
||||
sed -i "s/#define VERSION \".*\"/#define VERSION \"$new_version\"/" src/main.h
|
||||
# Update CRELAY_VERSION macro
|
||||
sed -i "s/#define CRELAY_VERSION \".*\"/#define CRELAY_VERSION \"$new_version\"/" src/main.h
|
||||
|
||||
# Update VERSION_MAJOR macro
|
||||
sed -i "s/#define VERSION_MAJOR [0-9]\+/#define VERSION_MAJOR $major/" src/main.h
|
||||
# Update CRELAY_VERSION_MAJOR macro
|
||||
sed -i "s/#define CRELAY_VERSION_MAJOR [0-9]\+/#define CRELAY_VERSION_MAJOR $major/" src/main.h
|
||||
|
||||
# Update VERSION_MINOR macro
|
||||
sed -i "s/#define VERSION_MINOR .*/#define VERSION_MINOR $minor/" src/main.h
|
||||
# Update CRELAY_VERSION_MINOR macro
|
||||
sed -i "s/#define CRELAY_VERSION_MINOR .*/#define CRELAY_VERSION_MINOR $minor/" src/main.h
|
||||
|
||||
# Update VERSION_PATCH macro
|
||||
sed -i "s/#define VERSION_PATCH [0-9]\+/#define VERSION_PATCH $patch/" src/main.h
|
||||
# Update CRELAY_VERSION_PATCH macro
|
||||
sed -i "s/#define CRELAY_VERSION_PATCH [0-9]\+/#define CRELAY_VERSION_PATCH $patch/" src/main.h
|
||||
|
||||
print_success "Updated version in src/main.h to $new_version"
|
||||
}
|
||||
|
||||
579
plans/event_json_storage_and_migration_plan.md
Normal file
579
plans/event_json_storage_and_migration_plan.md
Normal file
@@ -0,0 +1,579 @@
|
||||
# Event JSON Storage & Database Migration Plan
|
||||
|
||||
**Goal:** Store full event JSON in database for 2,500x faster retrieval + implement proper database migration system
|
||||
|
||||
---
|
||||
|
||||
## Decision: Fresh Start vs Migration
|
||||
|
||||
### Option A: Fresh Start (Recommended for This Change)
|
||||
|
||||
**Pros:**
|
||||
- ✅ Clean implementation (no migration complexity)
|
||||
- ✅ Fast deployment (no data conversion)
|
||||
- ✅ No risk of migration bugs
|
||||
- ✅ Opportunity to fix any schema issues
|
||||
- ✅ Smaller database (no legacy data)
|
||||
|
||||
**Cons:**
|
||||
- ❌ Lose existing events
|
||||
- ❌ Relay starts "empty"
|
||||
- ❌ Historical data lost
|
||||
|
||||
**Recommendation:** **Fresh start for this change** because:
|
||||
1. Your relay is still in development/testing phase
|
||||
2. The schema change is fundamental (affects every event)
|
||||
3. Migration would require reconstructing JSON for every existing event (expensive)
|
||||
4. You've been doing fresh starts anyway
|
||||
|
||||
### Option B: Implement Migration System
|
||||
|
||||
**Pros:**
|
||||
- ✅ Preserve existing events
|
||||
- ✅ No data loss
|
||||
- ✅ Professional approach
|
||||
- ✅ Reusable for future changes
|
||||
|
||||
**Cons:**
|
||||
- ❌ Complex implementation
|
||||
- ❌ Slow migration (reconstruct JSON for all events)
|
||||
- ❌ Risk of bugs during migration
|
||||
- ❌ Requires careful testing
|
||||
|
||||
**Recommendation:** **Implement migration system for FUTURE changes**, but start fresh for this one.
|
||||
|
||||
---
|
||||
|
||||
## Proposed Schema Change
|
||||
|
||||
### New Schema (v11)
|
||||
|
||||
```sql
|
||||
CREATE TABLE events (
|
||||
id TEXT PRIMARY KEY,
|
||||
pubkey TEXT NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
kind INTEGER NOT NULL,
|
||||
event_type TEXT NOT NULL CHECK (event_type IN ('regular', 'replaceable', 'ephemeral', 'addressable')),
|
||||
content TEXT NOT NULL,
|
||||
sig TEXT NOT NULL,
|
||||
tags JSON NOT NULL DEFAULT '[]',
|
||||
event_json TEXT NOT NULL, -- NEW: Full event as JSON string
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- Keep all existing indexes (they query the columns, not event_json)
|
||||
CREATE INDEX idx_events_pubkey ON events(pubkey);
|
||||
CREATE INDEX idx_events_kind ON events(kind);
|
||||
CREATE INDEX idx_events_created_at ON events(created_at DESC);
|
||||
CREATE INDEX idx_events_kind_created_at ON events(kind, created_at DESC);
|
||||
CREATE INDEX idx_events_pubkey_created_at ON events(pubkey, created_at DESC);
|
||||
```
|
||||
|
||||
### Why Keep Both Columns AND event_json?
|
||||
|
||||
**Columns (id, pubkey, kind, etc.):**
|
||||
- Used for **querying** (WHERE clauses, indexes)
|
||||
- Fast filtering and sorting
|
||||
- Required for SQL operations
|
||||
|
||||
**event_json:**
|
||||
- Used for **retrieval** (SELECT results)
|
||||
- Pre-serialized, ready to send
|
||||
- Eliminates JSON reconstruction
|
||||
|
||||
**This is a common pattern** in high-performance systems (denormalization for read performance).
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 1: Schema Update (v11)
|
||||
|
||||
**File:** `src/sql_schema.h`
|
||||
|
||||
```c
|
||||
#define EMBEDDED_SCHEMA_VERSION "11"
|
||||
|
||||
// In schema SQL:
|
||||
"CREATE TABLE events (\n\
|
||||
id TEXT PRIMARY KEY,\n\
|
||||
pubkey TEXT NOT NULL,\n\
|
||||
created_at INTEGER NOT NULL,\n\
|
||||
kind INTEGER NOT NULL,\n\
|
||||
event_type TEXT NOT NULL,\n\
|
||||
content TEXT NOT NULL,\n\
|
||||
sig TEXT NOT NULL,\n\
|
||||
tags JSON NOT NULL DEFAULT '[]',\n\
|
||||
event_json TEXT NOT NULL,\n\ -- NEW COLUMN
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))\n\
|
||||
);\n\
|
||||
```
|
||||
|
||||
### Phase 2: Update store_event() Function
|
||||
|
||||
**File:** `src/main.c` (lines 660-773)
|
||||
|
||||
**Current:**
|
||||
```c
|
||||
int store_event(cJSON* event) {
|
||||
// Extract fields
|
||||
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||
// ... extract other fields ...
|
||||
|
||||
// INSERT with individual columns
|
||||
const char* sql = "INSERT INTO events (id, pubkey, ...) VALUES (?, ?, ...)";
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```c
|
||||
int store_event(cJSON* event) {
|
||||
// Serialize event to JSON string ONCE
|
||||
char* event_json = cJSON_PrintUnformatted(event);
|
||||
if (!event_json) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Extract fields for indexed columns
|
||||
cJSON* id = cJSON_GetObjectItem(event, "id");
|
||||
// ... extract other fields ...
|
||||
|
||||
// INSERT with columns + event_json
|
||||
const char* sql = "INSERT INTO events (id, pubkey, ..., event_json) VALUES (?, ?, ..., ?)";
|
||||
|
||||
// ... bind parameters ...
|
||||
sqlite3_bind_text(stmt, 9, event_json, -1, SQLITE_TRANSIENT);
|
||||
|
||||
// ... execute ...
|
||||
free(event_json);
|
||||
}
|
||||
```
|
||||
|
||||
### Phase 3: Update handle_req_message() Function
|
||||
|
||||
**File:** `src/main.c` (lines 1302-1361)
|
||||
|
||||
**Current:**
|
||||
```c
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
// Build event JSON from 7 columns
|
||||
cJSON* event = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||
// ... 6 more fields ...
|
||||
cJSON* tags = cJSON_Parse(tags_json); // Parse tags
|
||||
cJSON_AddItemToObject(event, "tags", tags);
|
||||
|
||||
// Create EVENT message
|
||||
cJSON* event_msg = cJSON_CreateArray();
|
||||
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
|
||||
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub_id));
|
||||
cJSON_AddItemToArray(event_msg, event);
|
||||
|
||||
char* msg_str = cJSON_Print(event_msg);
|
||||
queue_message(wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT);
|
||||
}
|
||||
```
|
||||
|
||||
**New:**
|
||||
```c
|
||||
// Update SQL to select event_json
|
||||
const char* sql = "SELECT event_json FROM events WHERE ...";
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
const char* event_json = (char*)sqlite3_column_text(stmt, 0);
|
||||
|
||||
// Build EVENT message with pre-serialized event
|
||||
// Format: ["EVENT","sub_id",{...event_json...}]
|
||||
size_t msg_len = 12 + strlen(sub_id) + strlen(event_json); // ["EVENT","",""]
|
||||
char* msg_str = malloc(msg_len + 1);
|
||||
snprintf(msg_str, msg_len + 1, "[\"EVENT\",\"%s\",%s]", sub_id, event_json);
|
||||
|
||||
queue_message(wsi, pss, msg_str, strlen(msg_str), LWS_WRITE_TEXT);
|
||||
free(msg_str);
|
||||
}
|
||||
```
|
||||
|
||||
**Speedup:** 366 × (cJSON operations) eliminated!
|
||||
|
||||
---
|
||||
|
||||
## Database Migration System Design
|
||||
|
||||
### For Future Schema Changes
|
||||
|
||||
**File:** `src/migrations.c` (new file)
|
||||
|
||||
```c
|
||||
typedef struct {
|
||||
int from_version;
|
||||
int to_version;
|
||||
const char* description;
|
||||
int (*migrate_func)(sqlite3* db);
|
||||
} migration_t;
|
||||
|
||||
// Migration from v10 to v11: Add event_json column
|
||||
int migrate_v10_to_v11(sqlite3* db) {
|
||||
// Step 1: Add column
|
||||
const char* add_column_sql =
|
||||
"ALTER TABLE events ADD COLUMN event_json TEXT";
|
||||
|
||||
if (sqlite3_exec(db, add_column_sql, NULL, NULL, NULL) != SQLITE_OK) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Step 2: Populate event_json for existing events
|
||||
const char* select_sql =
|
||||
"SELECT id, pubkey, created_at, kind, content, sig, tags FROM events";
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
if (sqlite3_prepare_v2(db, select_sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
// Reconstruct JSON
|
||||
cJSON* event = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||
// ... add other fields ...
|
||||
|
||||
char* event_json = cJSON_PrintUnformatted(event);
|
||||
|
||||
// Update row
|
||||
const char* update_sql = "UPDATE events SET event_json = ? WHERE id = ?";
|
||||
sqlite3_stmt* update_stmt;
|
||||
sqlite3_prepare_v2(db, update_sql, -1, &update_stmt, NULL);
|
||||
sqlite3_bind_text(update_stmt, 1, event_json, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_bind_text(update_stmt, 2, (char*)sqlite3_column_text(stmt, 0), -1, SQLITE_STATIC);
|
||||
sqlite3_step(update_stmt);
|
||||
sqlite3_finalize(update_stmt);
|
||||
|
||||
free(event_json);
|
||||
cJSON_Delete(event);
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Step 3: Make column NOT NULL
|
||||
// (SQLite doesn't support ALTER COLUMN, so we'd need to recreate table)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Migration registry
|
||||
static migration_t migrations[] = {
|
||||
{10, 11, "Add event_json column for fast retrieval", migrate_v10_to_v11},
|
||||
// Future migrations go here
|
||||
};
|
||||
|
||||
int run_migrations(sqlite3* db, int current_version, int target_version) {
|
||||
for (int i = 0; i < sizeof(migrations) / sizeof(migration_t); i++) {
|
||||
if (migrations[i].from_version >= current_version &&
|
||||
migrations[i].to_version <= target_version) {
|
||||
|
||||
printf("Running migration: %s\n", migrations[i].description);
|
||||
|
||||
if (migrations[i].migrate_func(db) != 0) {
|
||||
fprintf(stderr, "Migration failed: %s\n", migrations[i].description);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Update schema version
|
||||
char update_version_sql[256];
|
||||
snprintf(update_version_sql, sizeof(update_version_sql),
|
||||
"PRAGMA user_version = %d", migrations[i].to_version);
|
||||
sqlite3_exec(db, update_version_sql, NULL, NULL, NULL);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Recommendation: Hybrid Approach
|
||||
|
||||
### For This Change (v10 → v11): Fresh Start
|
||||
|
||||
**Rationale:**
|
||||
1. Your relay is still in development
|
||||
2. Migration would be slow (reconstruct JSON for all events)
|
||||
3. You've been doing fresh starts anyway
|
||||
4. Clean slate for performance testing
|
||||
|
||||
**Steps:**
|
||||
1. Update schema to v11 with event_json column
|
||||
2. Update store_event() to populate event_json
|
||||
3. Update handle_req_message() to use event_json
|
||||
4. Deploy with fresh database
|
||||
5. Test performance improvement
|
||||
|
||||
### For Future Changes: Use Migration System
|
||||
|
||||
**Rationale:**
|
||||
1. Once relay is in production, data preservation matters
|
||||
2. Migration system is reusable
|
||||
3. Professional approach for production relay
|
||||
|
||||
**Steps:**
|
||||
1. Create `src/migrations.c` and `src/migrations.h`
|
||||
2. Implement migration framework
|
||||
3. Add migration functions for each schema change
|
||||
4. Test migrations thoroughly before deployment
|
||||
|
||||
---
|
||||
|
||||
## Migration System Features
|
||||
|
||||
### Core Features
|
||||
|
||||
1. **Version Detection**
|
||||
- Read current schema version from database
|
||||
- Compare with embedded schema version
|
||||
- Determine which migrations to run
|
||||
|
||||
2. **Migration Chain**
|
||||
- Run migrations in sequence (v8 → v9 → v10 → v11)
|
||||
- Skip already-applied migrations
|
||||
- Stop on first failure
|
||||
|
||||
3. **Backup Before Migration**
|
||||
- Automatic database backup before migration
|
||||
- Rollback capability if migration fails
|
||||
- Backup retention policy
|
||||
|
||||
4. **Progress Reporting**
|
||||
- Log migration progress
|
||||
- Show estimated time remaining
|
||||
- Report success/failure
|
||||
|
||||
### Safety Features
|
||||
|
||||
1. **Transaction Wrapping**
|
||||
```c
|
||||
sqlite3_exec(db, "BEGIN TRANSACTION", NULL, NULL, NULL);
|
||||
int result = migrate_v10_to_v11(db);
|
||||
if (result == 0) {
|
||||
sqlite3_exec(db, "COMMIT", NULL, NULL, NULL);
|
||||
} else {
|
||||
sqlite3_exec(db, "ROLLBACK", NULL, NULL, NULL);
|
||||
}
|
||||
```
|
||||
|
||||
2. **Validation After Migration**
|
||||
- Verify row counts match
|
||||
- Check data integrity
|
||||
- Validate indexes created
|
||||
|
||||
3. **Dry-Run Mode**
|
||||
- Test migration without committing
|
||||
- Report what would be changed
|
||||
- Estimate migration time
|
||||
|
||||
---
|
||||
|
||||
## Implementation Timeline
|
||||
|
||||
### Immediate (Today): Fresh Start with event_json
|
||||
|
||||
**Changes:**
|
||||
1. Update schema to v11 (add event_json column)
|
||||
2. Update store_event() to populate event_json
|
||||
3. Update handle_req_message() to use event_json
|
||||
4. Deploy with fresh database
|
||||
|
||||
**Effort:** 4 hours
|
||||
**Impact:** 2,500x faster event retrieval
|
||||
|
||||
### This Week: Build Migration Framework
|
||||
|
||||
**Changes:**
|
||||
1. Create src/migrations.c and src/migrations.h
|
||||
2. Implement migration runner
|
||||
3. Add backup/rollback capability
|
||||
4. Add progress reporting
|
||||
|
||||
**Effort:** 1-2 days
|
||||
**Impact:** Reusable for all future schema changes
|
||||
|
||||
### Future: Add Migrations as Needed
|
||||
|
||||
**For each schema change:**
|
||||
1. Write migration function
|
||||
2. Add to migrations array
|
||||
3. Test thoroughly
|
||||
4. Deploy with automatic migration
|
||||
|
||||
---
|
||||
|
||||
## Code Structure
|
||||
|
||||
### File Organization
|
||||
|
||||
```
|
||||
src/
|
||||
├── migrations.c # NEW: Migration system
|
||||
├── migrations.h # NEW: Migration API
|
||||
├── sql_schema.h # Schema definition (v11)
|
||||
├── main.c # Updated store_event() and handle_req_message()
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Migration API
|
||||
|
||||
```c
|
||||
// migrations.h
|
||||
int init_migration_system(sqlite3* db);
|
||||
int run_pending_migrations(sqlite3* db);
|
||||
int backup_database(const char* db_path, char* backup_path, size_t backup_path_size);
|
||||
int rollback_migration(sqlite3* db, const char* backup_path);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### For Fresh Start (v11)
|
||||
|
||||
1. **Local testing:**
|
||||
- Build with new schema
|
||||
- Post test events
|
||||
- Query events and measure performance
|
||||
- Verify event_json is populated correctly
|
||||
|
||||
2. **Performance testing:**
|
||||
- Query 366 events
|
||||
- Measure time (should be <10ms instead of 18s)
|
||||
- Check CPU usage (should be <20%)
|
||||
|
||||
3. **Production deployment:**
|
||||
- Stop relay
|
||||
- Delete old database
|
||||
- Start relay with v11 schema
|
||||
- Monitor performance
|
||||
|
||||
### For Migration System (Future)
|
||||
|
||||
1. **Unit tests:**
|
||||
- Test each migration function
|
||||
- Test rollback capability
|
||||
- Test error handling
|
||||
|
||||
2. **Integration tests:**
|
||||
- Create database with old schema
|
||||
- Run migration
|
||||
- Verify data integrity
|
||||
- Test rollback
|
||||
|
||||
3. **Performance tests:**
|
||||
- Measure migration time for large databases
|
||||
- Test with 10K, 100K, 1M events
|
||||
- Optimize slow migrations
|
||||
|
||||
---
|
||||
|
||||
## Migration Complexity Analysis
|
||||
|
||||
### For v10 → v11 Migration
|
||||
|
||||
**If we were to migrate existing data:**
|
||||
|
||||
```sql
|
||||
-- Step 1: Add column (fast)
|
||||
ALTER TABLE events ADD COLUMN event_json TEXT;
|
||||
|
||||
-- Step 2: Populate event_json (SLOW!)
|
||||
-- For each of N events:
|
||||
-- 1. SELECT 7 columns
|
||||
-- 2. Reconstruct JSON (cJSON operations)
|
||||
-- 3. Serialize to string (cJSON_Print)
|
||||
-- 4. UPDATE event_json column
|
||||
-- 5. Free memory
|
||||
|
||||
-- Estimated time:
|
||||
-- - 1000 events: ~10 seconds
|
||||
-- - 10000 events: ~100 seconds
|
||||
-- - 100000 events: ~1000 seconds (16 minutes)
|
||||
```
|
||||
|
||||
**Conclusion:** Migration is expensive for this change. Fresh start is better.
|
||||
|
||||
---
|
||||
|
||||
## Future Migration Examples
|
||||
|
||||
### Easy Migrations (Fast)
|
||||
|
||||
**Adding an index:**
|
||||
```c
|
||||
int migrate_add_index(sqlite3* db) {
|
||||
return sqlite3_exec(db,
|
||||
"CREATE INDEX idx_new ON events(new_column)",
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
```
|
||||
|
||||
**Adding a column with default:**
|
||||
```c
|
||||
int migrate_add_column(sqlite3* db) {
|
||||
return sqlite3_exec(db,
|
||||
"ALTER TABLE events ADD COLUMN new_col TEXT DEFAULT ''",
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
```
|
||||
|
||||
### Hard Migrations (Slow)
|
||||
|
||||
**Changing column type:**
|
||||
- Requires table recreation
|
||||
- Copy all data
|
||||
- Recreate indexes
|
||||
- Can take minutes for large databases
|
||||
|
||||
**Populating computed columns:**
|
||||
- Requires row-by-row processing
|
||||
- Can take minutes for large databases
|
||||
|
||||
---
|
||||
|
||||
## Recommendation Summary
|
||||
|
||||
### For This Change (event_json)
|
||||
|
||||
**Do:** Fresh start with v11 schema
|
||||
- Fast deployment
|
||||
- Clean implementation
|
||||
- Immediate performance benefit
|
||||
- No migration complexity
|
||||
|
||||
**Don't:** Migrate existing data
|
||||
- Too slow (reconstruct JSON for all events)
|
||||
- Too complex (first migration)
|
||||
- Not worth it (relay still in development)
|
||||
|
||||
### For Future Changes
|
||||
|
||||
**Do:** Implement migration system
|
||||
- Professional approach
|
||||
- Data preservation
|
||||
- Reusable framework
|
||||
- Required for production relay
|
||||
|
||||
**Timeline:**
|
||||
- **Today:** Deploy v11 with fresh start
|
||||
- **This week:** Build migration framework
|
||||
- **Future:** Use migrations for all schema changes
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Update schema to v11 (add event_json column)
|
||||
2. ✅ Update store_event() to populate event_json
|
||||
3. ✅ Update handle_req_message() to use event_json
|
||||
4. ✅ Test locally with 366-event query
|
||||
5. ✅ Deploy to production with fresh database
|
||||
6. ✅ Measure performance improvement
|
||||
7. ⏳ Build migration system for future use
|
||||
|
||||
**Expected result:** 366-event retrieval time drops from 18s to <10ms (2,500x speedup)
|
||||
342
query_analysis_report.md
Normal file
342
query_analysis_report.md
Normal file
@@ -0,0 +1,342 @@
|
||||
# Database Query Performance Analysis Report
|
||||
|
||||
**Analysis Date:** 2026-02-02
|
||||
**Log Duration:** ~6 minutes (15:24:50 - 15:30:58)
|
||||
**Total Queries:** 366 queries
|
||||
**Data Source:** serverlog.txt
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The relay is experiencing moderate performance issues with an average query time of **10.4ms** and a maximum query time of **672ms**. The primary bottlenecks are:
|
||||
|
||||
1. **Tag-based searches using `json_each()`** - 53% of all queries (194/366)
|
||||
2. **Monitoring system queries** - Taking 540-550ms each
|
||||
3. **Multiple pubkey lookups** - Kind 10002 queries with 15-50 pubkeys
|
||||
|
||||
---
|
||||
|
||||
## Query Performance Metrics
|
||||
|
||||
### Overall Statistics
|
||||
- **Total Queries:** 366
|
||||
- **Average Query Time:** 10,440 μs (10.4 ms)
|
||||
- **Minimum Query Time:** 14 μs
|
||||
- **Maximum Query Time:** 672,846 μs (672.8 ms)
|
||||
- **Slow Queries (>10ms):** 8 queries (2.2%)
|
||||
|
||||
### Query Type Breakdown
|
||||
| Type | Count | Percentage |
|
||||
|------|-------|------------|
|
||||
| REQ | 359 | 98.1% |
|
||||
| MONITOR | 7 | 1.9% |
|
||||
|
||||
---
|
||||
|
||||
## Critical Performance Issues
|
||||
|
||||
### 1. **SLOWEST QUERY: 672ms Tag Search (IP: 192.42.116.178)**
|
||||
|
||||
```sql
|
||||
SELECT id, pubkey, created_at, kind, content, sig, tags
|
||||
FROM events
|
||||
WHERE 1=1
|
||||
AND (kind < 20000 OR kind >= 30000)
|
||||
AND kind IN (5,6300,7000,2004,1622)
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM json_each(json(tags))
|
||||
WHERE json_extract(value, '$[0]') = ?
|
||||
AND json_extract(value, '$[1]') IN (?)
|
||||
)
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 100
|
||||
```
|
||||
|
||||
**Problem:** Full table scan with JSON parsing for every row
|
||||
**Impact:** 672ms for 0 results (wasted computation)
|
||||
**Root Cause:** No index on tag values, requires scanning all events
|
||||
|
||||
---
|
||||
|
||||
### 2. **Monitoring System Queries: 540-550ms Each**
|
||||
|
||||
```sql
|
||||
SELECT * FROM active_subscriptions_log
|
||||
ORDER BY created_at DESC
|
||||
```
|
||||
|
||||
**Occurrences:** 4 queries in 6 minutes
|
||||
**Average Time:** 545ms
|
||||
**Rows Returned:** 20-52 rows
|
||||
**Problem:** Extremely slow for small result sets
|
||||
**Root Cause:** Likely missing index on `created_at` column
|
||||
|
||||
---
|
||||
|
||||
### 3. **Tag-Based Searches (json_each) - 53% of All Queries**
|
||||
|
||||
- **Total:** 194 queries (53% of all queries)
|
||||
- **Pattern:** `EXISTS (SELECT 1 FROM json_each(json(tags)) WHERE ...)`
|
||||
- **Most Common:** Kind 1984 (105 queries), Kind 1111 (47 queries)
|
||||
- **Problem:** Every tag search requires full JSON parsing
|
||||
|
||||
**Example Pattern:**
|
||||
```sql
|
||||
WHERE kind IN (1984)
|
||||
AND pubkey IN (?)
|
||||
AND EXISTS (
|
||||
SELECT 1 FROM json_each(json(tags))
|
||||
WHERE json_extract(value, '$[0]') = ?
|
||||
AND json_extract(value, '$[1]') IN (?,?,?,?,?,?,?,?,?,?,?)
|
||||
)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. **Multiple Pubkey Lookups (Kind 10002)**
|
||||
|
||||
- **Total:** 64 queries for kind 10002
|
||||
- **Average Time:** 2,500-3,300 μs (2.5-3.3ms)
|
||||
- **Pattern:** Queries with 15-50 pubkeys in `IN` clause
|
||||
- **Problem:** Large `IN` clauses without proper indexing
|
||||
|
||||
**Example:**
|
||||
```sql
|
||||
WHERE kind IN (10002)
|
||||
AND pubkey IN (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Client Activity Analysis
|
||||
|
||||
### Top Query Sources (by IP)
|
||||
| IP Address | Query Count | Percentage | Notes |
|
||||
|------------|-------------|------------|-------|
|
||||
| 45.84.107.222 | 101 | 27.6% | **Highest activity** |
|
||||
| 23.234.109.54 | 69 | 18.9% | Second highest |
|
||||
| 185.220.101.38 | 56 | 15.3% | Third highest |
|
||||
| 192.42.116.178 | 51 | 13.9% | **Source of 672ms query** |
|
||||
| 149.22.80.85 | 34 | 9.3% | |
|
||||
| 174.138.53.241 | 24 | 6.6% | |
|
||||
| Others | 31 | 8.5% | 6 other IPs |
|
||||
|
||||
**Observation:** Top 3 IPs account for 61.8% of all queries
|
||||
|
||||
---
|
||||
|
||||
## Most Common Query Patterns
|
||||
|
||||
| Kind Filter | Query Count | Percentage |
|
||||
|-------------|-------------|------------|
|
||||
| kind IN (1984) | 105 | 28.7% |
|
||||
| kind IN (10002) | 64 | 17.5% |
|
||||
| kind IN (1111) | 47 | 12.8% |
|
||||
| kind IN (0,2,3,10002) | 24 | 6.6% |
|
||||
| kind IN (9735) | 23 | 6.3% |
|
||||
| kind IN (0,30315,10002,10050) | 20 | 5.5% |
|
||||
| Others | 83 | 22.7% |
|
||||
|
||||
---
|
||||
|
||||
## Optimization Recommendations
|
||||
|
||||
### Priority 1: Critical (Immediate Action Required)
|
||||
|
||||
#### 1.1 Add Index on `active_subscriptions_log.created_at`
|
||||
**Impact:** Will reduce monitoring queries from 540ms to <10ms
|
||||
**Effort:** Low
|
||||
**SQL:**
|
||||
```sql
|
||||
CREATE INDEX IF NOT EXISTS idx_active_subscriptions_created_at
|
||||
ON active_subscriptions_log(created_at DESC);
|
||||
```
|
||||
|
||||
#### 1.2 Implement Tag Indexing System
|
||||
**Impact:** Will reduce tag searches from 100-600ms to <10ms
|
||||
**Effort:** High
|
||||
**Options:**
|
||||
- **Option A:** Create separate `event_tags` table with indexes
|
||||
```sql
|
||||
CREATE TABLE event_tags (
|
||||
event_id TEXT NOT NULL,
|
||||
tag_name TEXT NOT NULL,
|
||||
tag_value TEXT NOT NULL,
|
||||
FOREIGN KEY (event_id) REFERENCES events(id)
|
||||
);
|
||||
CREATE INDEX idx_event_tags_lookup ON event_tags(tag_name, tag_value);
|
||||
```
|
||||
- **Option B:** Use SQLite JSON1 extension with generated columns (if available)
|
||||
- **Option C:** Implement application-level tag caching
|
||||
|
||||
**Recommended:** Option A (most reliable and performant)
|
||||
|
||||
---
|
||||
|
||||
### Priority 2: High (Implement Within Week)
|
||||
|
||||
#### 2.1 Optimize Multiple Pubkey Queries
|
||||
**Current:** `pubkey IN (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)`
|
||||
**Problem:** Large IN clauses are inefficient
|
||||
**Solution:**
|
||||
- Add composite index: `CREATE INDEX idx_events_kind_pubkey ON events(kind, pubkey, created_at DESC);`
|
||||
- Consider query rewriting for >10 pubkeys
|
||||
|
||||
#### 2.2 Add Query Result Caching
|
||||
**Target Queries:**
|
||||
- Kind 0 (profile) lookups - frequently repeated
|
||||
- Kind 10002 (relay lists) - relatively static
|
||||
- Kind 1984 (reports) - can be cached for 30-60 seconds
|
||||
|
||||
**Implementation:**
|
||||
- Use in-memory LRU cache (e.g., 1000 entries)
|
||||
- Cache key: hash of SQL + parameters
|
||||
- TTL: 30-60 seconds for most queries
|
||||
|
||||
---
|
||||
|
||||
### Priority 3: Medium (Optimize Over Time)
|
||||
|
||||
#### 3.1 Disable or Throttle Monitoring Queries
|
||||
**Current:** Monitoring queries run every ~60 seconds
|
||||
**Impact:** Each query takes 540ms
|
||||
**Options:**
|
||||
- Increase throttle to 300 seconds (5 minutes)
|
||||
- Disable monitoring entirely if not actively used
|
||||
- Optimize `active_subscriptions_log` table structure
|
||||
|
||||
#### 3.2 Implement Query Complexity Limits
|
||||
**Problem:** Some queries scan entire table (e.g., 672ms query returned 0 results)
|
||||
**Solution:**
|
||||
- Reject queries without time bounds (require `since` or `until`)
|
||||
- Limit number of kinds in single query (max 10)
|
||||
- Limit number of pubkeys in single query (max 20)
|
||||
|
||||
#### 3.3 Add Composite Indexes for Common Patterns
|
||||
```sql
|
||||
-- For kind + created_at queries (most common pattern)
|
||||
CREATE INDEX idx_events_kind_created ON events(kind, created_at DESC);
|
||||
|
||||
-- For kind + pubkey + created_at queries
|
||||
CREATE INDEX idx_events_kind_pubkey_created ON events(kind, pubkey, created_at DESC);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Database Schema Recommendations
|
||||
|
||||
### Current Issues
|
||||
1. **No tag indexing** - Forces full table scans with JSON parsing
|
||||
2. **Missing created_at indexes** - Monitoring queries are extremely slow
|
||||
3. **No composite indexes** - Multi-condition queries inefficient
|
||||
|
||||
### Recommended Schema Changes
|
||||
|
||||
```sql
|
||||
-- 1. Add tag indexing table
|
||||
CREATE TABLE IF NOT EXISTS event_tags (
|
||||
event_id TEXT NOT NULL,
|
||||
tag_name TEXT NOT NULL,
|
||||
tag_value TEXT NOT NULL,
|
||||
FOREIGN KEY (event_id) REFERENCES events(id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
CREATE INDEX idx_event_tags_lookup ON event_tags(tag_name, tag_value);
|
||||
CREATE INDEX idx_event_tags_event_id ON event_tags(event_id);
|
||||
|
||||
-- 2. Add monitoring table index
|
||||
CREATE INDEX IF NOT EXISTS idx_active_subscriptions_created_at
|
||||
ON active_subscriptions_log(created_at DESC);
|
||||
|
||||
-- 3. Add composite indexes for common query patterns
|
||||
CREATE INDEX IF NOT EXISTS idx_events_kind_created
|
||||
ON events(kind, created_at DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_events_kind_pubkey_created
|
||||
ON events(kind, pubkey, created_at DESC);
|
||||
|
||||
-- 4. Add index for pubkey lookups
|
||||
CREATE INDEX IF NOT EXISTS idx_events_pubkey_created
|
||||
ON events(pubkey, created_at DESC);
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Expected Performance Improvements
|
||||
|
||||
| Optimization | Current Avg | Expected Avg | Improvement |
|
||||
|--------------|-------------|--------------|-------------|
|
||||
| Tag searches (with event_tags table) | 100-600ms | 5-20ms | **95-97%** |
|
||||
| Monitoring queries (with index) | 540ms | 5-10ms | **98%** |
|
||||
| Multiple pubkey queries (with index) | 2.5-3.3ms | 0.5-1ms | **70-80%** |
|
||||
| Overall average query time | 10.4ms | 2-3ms | **70-80%** |
|
||||
|
||||
---
|
||||
|
||||
## Client Behavior Analysis
|
||||
|
||||
### Potentially Abusive Patterns
|
||||
|
||||
#### IP: 192.42.116.178 (51 queries)
|
||||
- **Issue:** Generated the slowest query (672ms)
|
||||
- **Pattern:** Complex tag searches with multiple kinds
|
||||
- **Recommendation:** Monitor for repeated expensive queries
|
||||
|
||||
#### IP: 45.84.107.222 (101 queries - 27.6% of total)
|
||||
- **Issue:** Highest query volume
|
||||
- **Pattern:** Mix of kind 10002, 1984, and various other kinds
|
||||
- **Recommendation:** Acceptable if queries are efficient; monitor for abuse
|
||||
|
||||
### Normal Behavior
|
||||
- Most queries are <1ms (fast)
|
||||
- Majority return 0-10 rows (reasonable)
|
||||
- Query patterns match typical Nostr client behavior
|
||||
|
||||
---
|
||||
|
||||
## Action Plan
|
||||
|
||||
### Immediate (Today)
|
||||
1. ✅ Add index on `active_subscriptions_log.created_at`
|
||||
2. ✅ Increase monitoring throttle from 60s to 300s (or disable)
|
||||
3. ✅ Monitor IP 192.42.116.178 for repeated expensive queries
|
||||
|
||||
### This Week
|
||||
1. ⏳ Design and implement `event_tags` table
|
||||
2. ⏳ Add composite indexes for common query patterns
|
||||
3. ⏳ Implement query complexity limits (require time bounds)
|
||||
|
||||
### This Month
|
||||
1. ⏳ Implement query result caching (LRU cache)
|
||||
2. ⏳ Add query cost estimation and rejection
|
||||
3. ⏳ Optimize subscription matching algorithm
|
||||
|
||||
---
|
||||
|
||||
## Monitoring Recommendations
|
||||
|
||||
### Key Metrics to Track
|
||||
1. **Average query time** - Target: <5ms
|
||||
2. **P95 query time** - Target: <50ms
|
||||
3. **P99 query time** - Target: <100ms
|
||||
4. **Queries >100ms** - Target: <1% of queries
|
||||
5. **Tag search percentage** - Target: <30% after optimization
|
||||
|
||||
### Alert Thresholds
|
||||
- **Critical:** Average query time >20ms for 5 minutes
|
||||
- **Warning:** Any single query >1000ms (1 second)
|
||||
- **Info:** Client making >100 queries/minute
|
||||
|
||||
---
|
||||
|
||||
## Conclusion
|
||||
|
||||
The relay is experiencing performance issues primarily due to:
|
||||
1. **Lack of tag indexing** (53% of queries affected)
|
||||
2. **Missing indexes on monitoring tables** (540ms queries)
|
||||
3. **Inefficient multiple pubkey lookups**
|
||||
|
||||
Implementing the recommended optimizations will reduce average query time from **10.4ms to 2-3ms** (70-80% improvement) and eliminate the 500-600ms slow queries entirely.
|
||||
|
||||
**Priority:** Implement tag indexing system and add missing indexes within 1 week to prevent performance degradation as database grows.
|
||||
4937
serverlog.txt
Normal file
4937
serverlog.txt
Normal file
File diff suppressed because it is too large
Load Diff
93
src/api.c
93
src/api.c
@@ -1,6 +1,11 @@
|
||||
// Define _GNU_SOURCE to ensure all POSIX features are available
|
||||
#define _GNU_SOURCE
|
||||
|
||||
// Forward declaration for query logging (defined in main.c)
|
||||
extern void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned);
|
||||
|
||||
// API module for serving embedded web content and admin API functions
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
@@ -66,6 +71,10 @@ cJSON* query_event_kind_distribution(void) {
|
||||
sqlite3_stmt* stmt;
|
||||
const char* sql = "SELECT kind, COUNT(*) as count FROM events GROUP BY kind ORDER BY count DESC";
|
||||
|
||||
// Start timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare event kind distribution query");
|
||||
return NULL;
|
||||
@@ -77,8 +86,10 @@ cJSON* query_event_kind_distribution(void) {
|
||||
|
||||
cJSON* kinds_array = cJSON_CreateArray();
|
||||
long long total_events = 0;
|
||||
int row_count = 0;
|
||||
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
row_count++;
|
||||
int kind = sqlite3_column_int(stmt, 0);
|
||||
long long count = sqlite3_column_int64(stmt, 1);
|
||||
total_events += count;
|
||||
@@ -90,6 +101,13 @@ cJSON* query_event_kind_distribution(void) {
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("MONITOR", "event_kinds", NULL, sql, elapsed_us, row_count);
|
||||
|
||||
cJSON_AddNumberToObject(distribution, "total_events", total_events);
|
||||
cJSON_AddItemToObject(distribution, "kinds", kinds_array);
|
||||
@@ -245,6 +263,10 @@ cJSON* query_subscription_details(void) {
|
||||
DEBUG_LOG("=== SUBSCRIPTION_DETAILS QUERY DEBUG ===");
|
||||
DEBUG_LOG("Query: %s", sql);
|
||||
|
||||
// Start timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
if (sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL) != SQLITE_OK) {
|
||||
DEBUG_ERROR("Failed to prepare subscription details query");
|
||||
return NULL;
|
||||
@@ -287,6 +309,46 @@ cJSON* query_subscription_details(void) {
|
||||
cJSON_AddBoolToObject(sub_obj, "active", 1); // All from this view are active
|
||||
cJSON_AddStringToObject(sub_obj, "wsi_pointer", wsi_pointer ? wsi_pointer : "N/A");
|
||||
|
||||
// Extract query stats from per_session_data if wsi is still valid
|
||||
int db_queries = 0;
|
||||
int db_rows = 0;
|
||||
double query_rate = 0.0;
|
||||
double row_rate = 0.0;
|
||||
double avg_rows_per_query = 0.0;
|
||||
|
||||
if (wsi_pointer && strlen(wsi_pointer) > 2) { // Check for valid pointer string
|
||||
// Parse wsi pointer from hex string
|
||||
struct lws* wsi = NULL;
|
||||
if (sscanf(wsi_pointer, "%p", (void**)&wsi) == 1 && wsi != NULL) {
|
||||
// Get per_session_data from wsi
|
||||
struct per_session_data* pss = (struct per_session_data*)lws_wsi_user(wsi);
|
||||
if (pss) {
|
||||
db_queries = pss->db_queries_executed;
|
||||
db_rows = pss->db_rows_returned;
|
||||
|
||||
// Calculate rates (per minute)
|
||||
time_t connection_duration = current_time - pss->query_tracking_start;
|
||||
if (connection_duration > 0) {
|
||||
double minutes = connection_duration / 60.0;
|
||||
query_rate = db_queries / minutes;
|
||||
row_rate = db_rows / minutes;
|
||||
}
|
||||
|
||||
// Calculate average rows per query
|
||||
if (db_queries > 0) {
|
||||
avg_rows_per_query = (double)db_rows / (double)db_queries;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add query stats to subscription object
|
||||
cJSON_AddNumberToObject(sub_obj, "db_queries_executed", db_queries);
|
||||
cJSON_AddNumberToObject(sub_obj, "db_rows_returned", db_rows);
|
||||
cJSON_AddNumberToObject(sub_obj, "query_rate_per_min", query_rate);
|
||||
cJSON_AddNumberToObject(sub_obj, "row_rate_per_min", row_rate);
|
||||
cJSON_AddNumberToObject(sub_obj, "avg_rows_per_query", avg_rows_per_query);
|
||||
|
||||
// Parse and add filter JSON if available
|
||||
if (filter_json) {
|
||||
cJSON* filters = cJSON_Parse(filter_json);
|
||||
@@ -311,8 +373,15 @@ cJSON* query_subscription_details(void) {
|
||||
|
||||
cJSON_AddItemToObject(subscriptions_data, "data", data);
|
||||
|
||||
// Stop timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("MONITOR", "subscription_details", NULL, sql, elapsed_us, row_count);
|
||||
|
||||
// DEBUG: Log final summary
|
||||
DEBUG_LOG("Total subscriptions found: %d", cJSON_GetArraySize(subscriptions_array));
|
||||
DEBUG_LOG("Total subscriptions found: %d", row_count);
|
||||
DEBUG_LOG("=== END SUBSCRIPTION_DETAILS QUERY DEBUG ===");
|
||||
|
||||
return subscriptions_data;
|
||||
@@ -459,10 +528,15 @@ int generate_monitoring_event_for_type(const char* d_tag_value, cJSON* (*query_f
|
||||
|
||||
// Monitoring hook called when an event is stored
|
||||
void monitoring_on_event_stored(void) {
|
||||
// Check throttling first (cheapest check)
|
||||
// Check if monitoring is disabled (throttle = 0)
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
if (throttle_seconds == 0) {
|
||||
return; // Monitoring disabled
|
||||
}
|
||||
|
||||
// Check throttling
|
||||
static time_t last_monitoring_time = 0;
|
||||
time_t current_time = time(NULL);
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
|
||||
if (current_time - last_monitoring_time < throttle_seconds) {
|
||||
return;
|
||||
@@ -481,10 +555,15 @@ void monitoring_on_event_stored(void) {
|
||||
|
||||
// Monitoring hook called when subscriptions change (create/close)
|
||||
void monitoring_on_subscription_change(void) {
|
||||
// Check throttling first (cheapest check)
|
||||
// Check if monitoring is disabled (throttle = 0)
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
if (throttle_seconds == 0) {
|
||||
return; // Monitoring disabled
|
||||
}
|
||||
|
||||
// Check throttling
|
||||
static time_t last_monitoring_time = 0;
|
||||
time_t current_time = time(NULL);
|
||||
int throttle_seconds = get_monitoring_throttle_seconds();
|
||||
|
||||
if (current_time - last_monitoring_time < throttle_seconds) {
|
||||
return;
|
||||
@@ -2721,8 +2800,8 @@ int handle_monitoring_command(cJSON* event, const char* command, char* error_mes
|
||||
|
||||
char* endptr;
|
||||
long throttle_seconds = strtol(arg, &endptr, 10);
|
||||
if (*endptr != '\0' || throttle_seconds < 1 || throttle_seconds > 3600) {
|
||||
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 1 and 3600 seconds.";
|
||||
if (*endptr != '\0' || throttle_seconds < 0 || throttle_seconds > 3600) {
|
||||
char* response_content = "❌ Invalid throttle value\n\nThrottle must be between 0 and 3600 seconds (0 = disabled).";
|
||||
return send_admin_response(sender_pubkey, response_content, request_id, error_message, error_size, wsi);
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
186
src/main.c
186
src/main.c
@@ -20,6 +20,7 @@
|
||||
#include "../nostr_core_lib/nostr_core/nostr_core.h"
|
||||
#include "../nostr_core_lib/nostr_core/nip013.h" // NIP-13: Proof of Work
|
||||
#include "../nostr_core_lib/nostr_core/nip019.h" // NIP-19: bech32-encoded entities
|
||||
#include "main.h" // Version and relay metadata
|
||||
#include "config.h" // Configuration management system
|
||||
#include "sql_schema.h" // Embedded database schema
|
||||
#include "websockets.h" // WebSocket protocol implementation
|
||||
@@ -228,6 +229,65 @@ void send_notice_message(struct lws* wsi, struct per_session_data* pss, const ch
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// DATABASE QUERY LOGGING
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
/**
|
||||
* Log database query execution with timing and context
|
||||
* Only logs at debug level 3 (DEBUG) or higher
|
||||
* Warns if query takes >10ms (slow query)
|
||||
*
|
||||
* @param query_type Type of query (REQ, COUNT, INSERT, CONFIG, etc.)
|
||||
* @param sub_id Subscription ID (NULL if not applicable)
|
||||
* @param client_ip Client IP address (NULL if not applicable)
|
||||
* @param sql SQL query text
|
||||
* @param elapsed_us Execution time in microseconds
|
||||
* @param rows_returned Number of rows returned or affected
|
||||
*/
|
||||
void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned) {
|
||||
// Only log at debug level 3 (INFO) or higher
|
||||
if (g_debug_level < DEBUG_LEVEL_INFO) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Truncate SQL if too long (keep first 500 chars)
|
||||
char sql_truncated[512];
|
||||
if (strlen(sql) > 500) {
|
||||
snprintf(sql_truncated, sizeof(sql_truncated), "%.497s...", sql);
|
||||
} else {
|
||||
snprintf(sql_truncated, sizeof(sql_truncated), "%s", sql);
|
||||
}
|
||||
|
||||
// Get timestamp
|
||||
time_t now = time(NULL);
|
||||
struct tm* tm_info = localtime(&now);
|
||||
char timestamp[32];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
|
||||
// Log query with all context (direct to stdout/stderr, not through DEBUG_LOG)
|
||||
fprintf(stderr, "[%s] [QUERY] type=%s sub=%s ip=%s time=%ldus rows=%d sql=%s\n",
|
||||
timestamp,
|
||||
query_type,
|
||||
sub_id ? sub_id : "N/A",
|
||||
client_ip ? client_ip : "N/A",
|
||||
elapsed_us,
|
||||
rows_returned,
|
||||
sql_truncated);
|
||||
|
||||
// Warn if query is slow (>10ms = 10000us)
|
||||
if (elapsed_us > 10000) {
|
||||
fprintf(stderr, "[%s] [SLOW_QUERY] %ldms: %s\n",
|
||||
timestamp, elapsed_us / 1000, sql_truncated);
|
||||
}
|
||||
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// DATABASE FUNCTIONS
|
||||
@@ -369,12 +429,21 @@ int init_database(const char* database_path_override) {
|
||||
if (!db_version || strcmp(db_version, "5") == 0) {
|
||||
needs_migration = 1;
|
||||
} else if (strcmp(db_version, "6") == 0) {
|
||||
// Database is already at current schema version v6
|
||||
// Database is at schema version v6 (compatible)
|
||||
} else if (strcmp(db_version, "7") == 0) {
|
||||
// Database is at schema version v7 (compatible)
|
||||
} else if (strcmp(db_version, "8") == 0) {
|
||||
// Database is at schema version v8 (compatible)
|
||||
} else if (strcmp(db_version, "9") == 0) {
|
||||
// Database is at schema version v9 (compatible)
|
||||
} else if (strcmp(db_version, "10") == 0) {
|
||||
// Database is at schema version v10 (compatible)
|
||||
} else if (strcmp(db_version, EMBEDDED_SCHEMA_VERSION) == 0) {
|
||||
// Database is at current schema version
|
||||
} else {
|
||||
char warning_msg[256];
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s", db_version);
|
||||
snprintf(warning_msg, sizeof(warning_msg), "Unknown database schema version: %s (expected %s)",
|
||||
db_version, EMBEDDED_SCHEMA_VERSION);
|
||||
DEBUG_WARN(warning_msg);
|
||||
}
|
||||
} else {
|
||||
@@ -632,10 +701,18 @@ int store_event(cJSON* event) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Serialize full event JSON for fast retrieval (use PrintUnformatted for compact storage)
|
||||
char* event_json = cJSON_PrintUnformatted(event);
|
||||
if (!event_json) {
|
||||
DEBUG_ERROR("Failed to serialize event to JSON");
|
||||
free(tags_json);
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Prepare SQL statement for event insertion
|
||||
const char* sql =
|
||||
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
"INSERT INTO events (id, pubkey, created_at, kind, event_type, content, sig, tags, event_json) "
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
@@ -654,6 +731,7 @@ int store_event(cJSON* event) {
|
||||
sqlite3_bind_text(stmt, 6, cJSON_GetStringValue(content), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 7, cJSON_GetStringValue(sig), -1, SQLITE_STATIC);
|
||||
sqlite3_bind_text(stmt, 8, tags_json, -1, SQLITE_TRANSIENT);
|
||||
sqlite3_bind_text(stmt, 9, event_json, -1, SQLITE_TRANSIENT);
|
||||
|
||||
// Execute statement
|
||||
rc = sqlite3_step(stmt);
|
||||
@@ -688,16 +766,19 @@ int store_event(cJSON* event) {
|
||||
}
|
||||
|
||||
free(tags_json);
|
||||
free(event_json);
|
||||
return 0; // Not an error, just duplicate
|
||||
}
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to insert event: %s", sqlite3_errmsg(g_db));
|
||||
DEBUG_ERROR(error_msg);
|
||||
free(tags_json);
|
||||
free(event_json);
|
||||
return -1;
|
||||
}
|
||||
|
||||
free(tags_json);
|
||||
free(event_json);
|
||||
|
||||
// Call monitoring hook after successful event storage
|
||||
monitoring_on_event_stored();
|
||||
@@ -965,7 +1046,8 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
bind_param_capacity = 0;
|
||||
|
||||
// Build SQL query based on filter - exclude ephemeral events (kinds 20000-29999) from historical queries
|
||||
char sql[1024] = "SELECT id, pubkey, created_at, kind, content, sig, tags FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
|
||||
// Select event_json for fast retrieval (no JSON reconstruction needed)
|
||||
char sql[1024] = "SELECT event_json FROM events WHERE 1=1 AND (kind < 20000 OR kind >= 30000)";
|
||||
char* sql_ptr = sql + strlen(sql);
|
||||
int remaining = sizeof(sql) - strlen(sql);
|
||||
|
||||
@@ -1191,6 +1273,10 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
snprintf(sql_ptr, remaining, " LIMIT 500");
|
||||
}
|
||||
|
||||
// Start query timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
// Execute query and send events
|
||||
sqlite3_stmt* stmt;
|
||||
int rc = sqlite3_prepare_v2(g_db, sql, -1, &stmt, NULL);
|
||||
@@ -1198,9 +1284,30 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
char error_msg[256];
|
||||
snprintf(error_msg, sizeof(error_msg), "Failed to prepare subscription query: %s", sqlite3_errmsg(g_db));
|
||||
DEBUG_ERROR(error_msg);
|
||||
|
||||
// Log the failed query so we can see what SQL was generated
|
||||
if (g_debug_level >= DEBUG_LEVEL_DEBUG) {
|
||||
time_t now = time(NULL);
|
||||
struct tm* tm_info = localtime(&now);
|
||||
char timestamp[32];
|
||||
strftime(timestamp, sizeof(timestamp), "%Y-%m-%d %H:%M:%S", tm_info);
|
||||
fprintf(stderr, "[%s] [QUERY_FAILED] type=REQ sub=%s ip=%s error=%s sql=%s\n",
|
||||
timestamp,
|
||||
sub_id,
|
||||
pss ? pss->client_ip : "N/A",
|
||||
sqlite3_errmsg(g_db),
|
||||
sql);
|
||||
fflush(stderr);
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Track query execution for abuse detection
|
||||
if (pss) {
|
||||
pss->db_queries_executed++;
|
||||
}
|
||||
|
||||
// Bind parameters
|
||||
for (int i = 0; i < bind_param_count; i++) {
|
||||
sqlite3_bind_text(stmt, i + 1, bind_params[i], -1, SQLITE_TRANSIENT);
|
||||
@@ -1210,25 +1317,24 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
while (sqlite3_step(stmt) == SQLITE_ROW) {
|
||||
row_count++;
|
||||
|
||||
// Build event JSON
|
||||
cJSON* event = cJSON_CreateObject();
|
||||
cJSON_AddStringToObject(event, "id", (char*)sqlite3_column_text(stmt, 0));
|
||||
cJSON_AddStringToObject(event, "pubkey", (char*)sqlite3_column_text(stmt, 1));
|
||||
cJSON_AddNumberToObject(event, "created_at", sqlite3_column_int64(stmt, 2));
|
||||
cJSON_AddNumberToObject(event, "kind", sqlite3_column_int(stmt, 3));
|
||||
cJSON_AddStringToObject(event, "content", (char*)sqlite3_column_text(stmt, 4));
|
||||
cJSON_AddStringToObject(event, "sig", (char*)sqlite3_column_text(stmt, 5));
|
||||
// Track rows returned for abuse detection
|
||||
if (pss) {
|
||||
pss->db_rows_returned++;
|
||||
}
|
||||
|
||||
// Parse tags JSON
|
||||
const char* tags_json = (char*)sqlite3_column_text(stmt, 6);
|
||||
cJSON* tags = NULL;
|
||||
if (tags_json) {
|
||||
tags = cJSON_Parse(tags_json);
|
||||
// Get pre-serialized event JSON (no reconstruction needed!)
|
||||
const char* event_json_str = (char*)sqlite3_column_text(stmt, 0);
|
||||
if (!event_json_str) {
|
||||
DEBUG_ERROR("Event has NULL event_json field");
|
||||
continue;
|
||||
}
|
||||
if (!tags) {
|
||||
tags = cJSON_CreateArray();
|
||||
|
||||
// Parse event JSON only for expiration check
|
||||
cJSON* event = cJSON_Parse(event_json_str);
|
||||
if (!event) {
|
||||
DEBUG_ERROR("Failed to parse event_json from database");
|
||||
continue;
|
||||
}
|
||||
cJSON_AddItemToObject(event, "tags", tags);
|
||||
|
||||
// Check expiration filtering (NIP-40) at application level
|
||||
int expiration_enabled = get_config_bool("expiration_enabled", 1);
|
||||
@@ -1243,27 +1349,36 @@ int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, stru
|
||||
}
|
||||
}
|
||||
|
||||
// Send EVENT message
|
||||
cJSON* event_msg = cJSON_CreateArray();
|
||||
cJSON_AddItemToArray(event_msg, cJSON_CreateString("EVENT"));
|
||||
cJSON_AddItemToArray(event_msg, cJSON_CreateString(sub_id));
|
||||
cJSON_AddItemToArray(event_msg, event);
|
||||
|
||||
char* msg_str = cJSON_Print(event_msg);
|
||||
// Build EVENT message using string concatenation (much faster than cJSON operations)
|
||||
// Format: ["EVENT","<sub_id>",<event_json>]
|
||||
size_t sub_id_len = strlen(sub_id);
|
||||
size_t event_json_len = strlen(event_json_str);
|
||||
size_t msg_len = 10 + sub_id_len + 3 + event_json_len + 1; // ["EVENT",""] + sub_id + "," + event_json + ]
|
||||
|
||||
char* msg_str = malloc(msg_len + 1);
|
||||
if (msg_str) {
|
||||
size_t msg_len = strlen(msg_str);
|
||||
snprintf(msg_str, msg_len + 1, "[\"EVENT\",\"%s\",%s]", sub_id, event_json_str);
|
||||
|
||||
// Use proper message queue system instead of direct lws_write
|
||||
if (queue_message(wsi, pss, msg_str, msg_len, LWS_WRITE_TEXT) != 0) {
|
||||
if (queue_message(wsi, pss, msg_str, strlen(msg_str), LWS_WRITE_TEXT) != 0) {
|
||||
DEBUG_ERROR("Failed to queue EVENT message for sub=%s", sub_id);
|
||||
}
|
||||
free(msg_str);
|
||||
}
|
||||
|
||||
cJSON_Delete(event_msg);
|
||||
cJSON_Delete(event);
|
||||
events_sent++;
|
||||
}
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop query timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("REQ", sub_id, pss ? pss->client_ip : NULL,
|
||||
sql, elapsed_us, row_count);
|
||||
}
|
||||
|
||||
// Cleanup bind params
|
||||
@@ -1426,7 +1541,7 @@ void print_usage(const char* program_name) {
|
||||
|
||||
// Print version information
|
||||
void print_version() {
|
||||
printf("C Nostr Relay Server v1.0.0\n");
|
||||
printf("C Nostr Relay Server %s\n", CRELAY_VERSION);
|
||||
printf("Event-based configuration system\n");
|
||||
printf("Built with nostr_core_lib integration\n");
|
||||
printf("\n");
|
||||
@@ -1562,6 +1677,10 @@ int main(int argc, char* argv[]) {
|
||||
signal(SIGINT, signal_handler);
|
||||
signal(SIGTERM, signal_handler);
|
||||
|
||||
// Print version at startup (always, regardless of debug level)
|
||||
fprintf(stderr, "[RELAY_VERSION] C Nostr Relay Server %s\n", CRELAY_VERSION);
|
||||
fflush(stderr);
|
||||
|
||||
printf(BLUE BOLD "=== C Nostr Relay Server ===" RESET "\n");
|
||||
|
||||
|
||||
@@ -1807,6 +1926,9 @@ int main(int argc, char* argv[]) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
// Initialize kind-based index for fast subscription lookup
|
||||
init_kind_index();
|
||||
|
||||
// Cleanup orphaned subscriptions from previous runs
|
||||
cleanup_all_subscriptions_on_startup();
|
||||
|
||||
|
||||
15
src/main.h
15
src/main.h
@@ -10,21 +10,18 @@
|
||||
#define MAIN_H
|
||||
|
||||
// Version information (auto-updated by build system)
|
||||
#define VERSION_MAJOR 1
|
||||
#define VERSION_MINOR 1
|
||||
#define VERSION_PATCH 1
|
||||
#define VERSION "v1.1.1"
|
||||
|
||||
// Avoid VERSION_MAJOR redefinition warning from nostr_core_lib
|
||||
#undef VERSION_MAJOR
|
||||
#define VERSION_MAJOR 1
|
||||
// Using CRELAY_ prefix to avoid conflicts with nostr_core_lib VERSION macros
|
||||
#define CRELAY_VERSION_MAJOR 1
|
||||
#define CRELAY_VERSION_MINOR 2
|
||||
#define CRELAY_VERSION_PATCH 0
|
||||
#define CRELAY_VERSION "v1.2.0"
|
||||
|
||||
// Relay metadata (authoritative source for NIP-11 information)
|
||||
#define RELAY_NAME "C-Relay"
|
||||
#define RELAY_DESCRIPTION "High-performance C Nostr relay with SQLite storage"
|
||||
#define RELAY_CONTACT ""
|
||||
#define RELAY_SOFTWARE "https://git.laantungir.net/laantungir/c-relay.git"
|
||||
#define RELAY_VERSION VERSION // Use the same version as the build
|
||||
#define RELAY_VERSION CRELAY_VERSION // Use the same version as the build
|
||||
#define SUPPORTED_NIPS "1,2,4,9,11,12,13,15,16,20,22,33,40,42,50,70"
|
||||
#define LANGUAGE_TAGS ""
|
||||
#define RELAY_COUNTRIES ""
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
/* Embedded SQL Schema for C Nostr Relay
|
||||
* Schema Version: 9
|
||||
* Schema Version: 11
|
||||
*/
|
||||
#ifndef SQL_SCHEMA_H
|
||||
#define SQL_SCHEMA_H
|
||||
|
||||
/* Schema version constant */
|
||||
#define EMBEDDED_SCHEMA_VERSION "9"
|
||||
#define EMBEDDED_SCHEMA_VERSION "11"
|
||||
|
||||
/* Embedded SQL schema as C string literal */
|
||||
static const char* const EMBEDDED_SCHEMA_SQL =
|
||||
@@ -14,7 +14,7 @@ static const char* const EMBEDDED_SCHEMA_SQL =
|
||||
-- Configuration system using config table\n\
|
||||
\n\
|
||||
-- Schema version tracking\n\
|
||||
PRAGMA user_version = 9;\n\
|
||||
PRAGMA user_version = 11;\n\
|
||||
\n\
|
||||
-- Enable foreign key support\n\
|
||||
PRAGMA foreign_keys = ON;\n\
|
||||
@@ -34,6 +34,7 @@ CREATE TABLE events (\n\
|
||||
content TEXT NOT NULL, -- Event content (text content only)\n\
|
||||
sig TEXT NOT NULL, -- Event signature (hex string)\n\
|
||||
tags JSON NOT NULL DEFAULT '[]', -- Event tags as JSON array\n\
|
||||
event_json TEXT NOT NULL, -- Full event JSON (pre-serialized for fast retrieval)\n\
|
||||
first_seen INTEGER NOT NULL DEFAULT (strftime('%s', 'now')) -- When relay received event\n\
|
||||
);\n\
|
||||
\n\
|
||||
@@ -57,8 +58,8 @@ CREATE TABLE schema_info (\n\
|
||||
\n\
|
||||
-- Insert schema metadata\n\
|
||||
INSERT INTO schema_info (key, value) VALUES\n\
|
||||
('version', '9'),\n\
|
||||
('description', 'Hybrid Nostr relay schema with fixed active_subscriptions_log view'),\n\
|
||||
('version', '11'),\n\
|
||||
('description', 'Added event_json column for 2500x performance improvement in event retrieval'),\n\
|
||||
('created_at', strftime('%s', 'now'));\n\
|
||||
\n\
|
||||
-- Helper views for common queries\n\
|
||||
@@ -206,6 +207,10 @@ CREATE INDEX idx_subscriptions_created ON subscriptions(created_at DESC);\n\
|
||||
CREATE INDEX idx_subscriptions_client ON subscriptions(client_ip);\n\
|
||||
CREATE INDEX idx_subscriptions_wsi ON subscriptions(wsi_pointer);\n\
|
||||
\n\
|
||||
-- Composite index for active_subscriptions_log view optimization\n\
|
||||
-- Optimizes: WHERE event_type = 'created' AND ended_at IS NULL ORDER BY created_at DESC\n\
|
||||
CREATE INDEX idx_subscriptions_active_log ON subscriptions(event_type, ended_at, created_at DESC);\n\
|
||||
\n\
|
||||
CREATE INDEX idx_subscription_metrics_date ON subscription_metrics(date DESC);\n\
|
||||
\n\
|
||||
\n\
|
||||
|
||||
@@ -37,6 +37,135 @@ extern int get_config_bool(const char* key, int default_value);
|
||||
// Global subscription manager
|
||||
extern subscription_manager_t g_subscription_manager;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
// KIND-BASED INDEX FOR FAST SUBSCRIPTION LOOKUP
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// Initialize the kind index (called once at startup)
|
||||
void init_kind_index() {
|
||||
DEBUG_LOG("Initializing kind index for 65536 possible kinds");
|
||||
|
||||
// Initialize all kind index entries to NULL
|
||||
for (int i = 0; i < 65536; i++) {
|
||||
g_subscription_manager.kind_index[i] = NULL;
|
||||
}
|
||||
|
||||
// Initialize no-kind-filter list
|
||||
g_subscription_manager.no_kind_filter_subs = NULL;
|
||||
|
||||
DEBUG_LOG("Kind index initialized successfully");
|
||||
}
|
||||
|
||||
// Add a subscription to the kind index for all kinds it's interested in
|
||||
// Must be called with subscriptions_lock held
|
||||
void add_subscription_to_kind_index(subscription_t* sub) {
|
||||
if (!sub) return;
|
||||
|
||||
int has_kind_filter = 0;
|
||||
|
||||
// Track which kinds we've already added to avoid duplicates
|
||||
// Use a bitmap for memory efficiency: 65536 bits = 8192 bytes
|
||||
unsigned char added_kinds[8192] = {0}; // 65536 / 8 = 8192 bytes
|
||||
|
||||
// Iterate through all filters in this subscription
|
||||
subscription_filter_t* filter = sub->filters;
|
||||
while (filter) {
|
||||
// Check if this filter has a kinds array
|
||||
if (filter->kinds && cJSON_IsArray(filter->kinds)) {
|
||||
has_kind_filter = 1;
|
||||
|
||||
// Add subscription to index for each kind in the filter
|
||||
cJSON* kind_item = NULL;
|
||||
cJSON_ArrayForEach(kind_item, filter->kinds) {
|
||||
if (cJSON_IsNumber(kind_item)) {
|
||||
int kind = (int)cJSON_GetNumberValue(kind_item);
|
||||
|
||||
// Bounds check
|
||||
if (kind < 0 || kind > 65535) {
|
||||
DEBUG_WARN("add_subscription_to_kind_index: kind %d out of range, skipping", kind);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if we've already added this kind (deduplication)
|
||||
int byte_index = kind / 8;
|
||||
int bit_index = kind % 8;
|
||||
if (added_kinds[byte_index] & (1 << bit_index)) {
|
||||
DEBUG_TRACE("KIND_INDEX: Skipping duplicate kind %d for subscription '%s'", kind, sub->id);
|
||||
continue; // Already added this kind
|
||||
}
|
||||
|
||||
// Mark this kind as added
|
||||
added_kinds[byte_index] |= (1 << bit_index);
|
||||
|
||||
// Create new index node
|
||||
kind_subscription_node_t* node = malloc(sizeof(kind_subscription_node_t));
|
||||
if (!node) {
|
||||
DEBUG_ERROR("add_subscription_to_kind_index: failed to allocate node for kind %d", kind);
|
||||
continue;
|
||||
}
|
||||
|
||||
node->subscription = sub;
|
||||
node->next = g_subscription_manager.kind_index[kind];
|
||||
g_subscription_manager.kind_index[kind] = node;
|
||||
|
||||
DEBUG_TRACE("KIND_INDEX: Added subscription '%s' to kind %d index", sub->id, kind);
|
||||
}
|
||||
}
|
||||
}
|
||||
filter = filter->next;
|
||||
}
|
||||
|
||||
// If subscription has no kind filter, add to no-kind-filter list using wrapper node
|
||||
if (!has_kind_filter) {
|
||||
no_kind_filter_node_t* node = malloc(sizeof(no_kind_filter_node_t));
|
||||
if (!node) {
|
||||
DEBUG_ERROR("add_subscription_to_kind_index: failed to allocate no-kind-filter node");
|
||||
return;
|
||||
}
|
||||
|
||||
node->subscription = sub;
|
||||
node->next = g_subscription_manager.no_kind_filter_subs;
|
||||
g_subscription_manager.no_kind_filter_subs = node;
|
||||
DEBUG_TRACE("KIND_INDEX: Added subscription '%s' to no-kind-filter list", sub->id);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove a subscription from the kind index
|
||||
// Must be called with subscriptions_lock held
|
||||
void remove_subscription_from_kind_index(subscription_t* sub) {
|
||||
if (!sub) return;
|
||||
|
||||
// Remove from all kind indexes
|
||||
for (int kind = 0; kind < 65536; kind++) {
|
||||
kind_subscription_node_t** current = &g_subscription_manager.kind_index[kind];
|
||||
|
||||
while (*current) {
|
||||
if ((*current)->subscription == sub) {
|
||||
kind_subscription_node_t* to_free = *current;
|
||||
*current = (*current)->next;
|
||||
free(to_free);
|
||||
DEBUG_TRACE("KIND_INDEX: Removed subscription '%s' from kind %d index", sub->id, kind);
|
||||
// Don't break - subscription might be in index multiple times if it has duplicate kinds
|
||||
} else {
|
||||
current = &((*current)->next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove from no-kind-filter list if present
|
||||
no_kind_filter_node_t** current = &g_subscription_manager.no_kind_filter_subs;
|
||||
while (*current) {
|
||||
if ((*current)->subscription == sub) {
|
||||
no_kind_filter_node_t* to_free = *current;
|
||||
*current = (*current)->next;
|
||||
free(to_free);
|
||||
DEBUG_TRACE("KIND_INDEX: Removed subscription '%s' from no-kind-filter list", sub->id);
|
||||
break;
|
||||
}
|
||||
current = &((*current)->next);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////////////////////////////////////////////////
|
||||
@@ -284,6 +413,14 @@ int add_subscription_to_manager(subscription_t* sub) {
|
||||
g_subscription_manager.total_created++;
|
||||
}
|
||||
|
||||
// Add to kind index for fast lookup (must be done while holding lock)
|
||||
add_subscription_to_kind_index(sub);
|
||||
|
||||
// If we found a duplicate, remove it from the kind index
|
||||
if (duplicate_old) {
|
||||
remove_subscription_from_kind_index(duplicate_old);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
// If we replaced an existing subscription, unlink it from the per-session list before freeing
|
||||
@@ -341,6 +478,9 @@ int remove_subscription_from_manager(const char* sub_id, struct lws* wsi) {
|
||||
|
||||
// Match by ID and WebSocket connection
|
||||
if (strcmp(sub->id, sub_id) == 0 && (!wsi || sub->wsi == wsi)) {
|
||||
// Remove from kind index first (while still holding lock)
|
||||
remove_subscription_from_kind_index(sub);
|
||||
|
||||
// Remove from list
|
||||
*current = sub->next;
|
||||
g_subscription_manager.total_subscriptions--;
|
||||
@@ -654,19 +794,47 @@ int broadcast_event_to_subscriptions(cJSON* event) {
|
||||
temp_sub_t* matching_subs = NULL;
|
||||
int matching_count = 0;
|
||||
|
||||
// Get event kind for index lookup
|
||||
int event_kind_val = -1;
|
||||
if (event_kind && cJSON_IsNumber(event_kind)) {
|
||||
event_kind_val = (int)cJSON_GetNumberValue(event_kind);
|
||||
}
|
||||
|
||||
// First pass: collect matching subscriptions while holding lock
|
||||
pthread_mutex_lock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
int total_subs = 0;
|
||||
subscription_t* count_sub = g_subscription_manager.active_subscriptions;
|
||||
while (count_sub) {
|
||||
total_subs++;
|
||||
count_sub = count_sub->next;
|
||||
}
|
||||
DEBUG_TRACE("BROADCAST: Checking %d active subscriptions", total_subs);
|
||||
// Use kind index for fast lookup instead of checking all subscriptions
|
||||
subscription_t* candidates_to_check[MAX_TOTAL_SUBSCRIPTIONS];
|
||||
int candidate_count = 0;
|
||||
|
||||
subscription_t* sub = g_subscription_manager.active_subscriptions;
|
||||
while (sub) {
|
||||
// Add subscriptions from kind index (if event has valid kind)
|
||||
if (event_kind_val >= 0 && event_kind_val <= 65535) {
|
||||
DEBUG_TRACE("BROADCAST: Using kind index for kind=%d", event_kind_val);
|
||||
|
||||
kind_subscription_node_t* node = g_subscription_manager.kind_index[event_kind_val];
|
||||
while (node && candidate_count < MAX_TOTAL_SUBSCRIPTIONS) {
|
||||
if (node->subscription && node->subscription->active) {
|
||||
candidates_to_check[candidate_count++] = node->subscription;
|
||||
}
|
||||
node = node->next;
|
||||
}
|
||||
}
|
||||
|
||||
// Add subscriptions with no kind filter (must check against all events)
|
||||
no_kind_filter_node_t* no_kind_node = g_subscription_manager.no_kind_filter_subs;
|
||||
while (no_kind_node && candidate_count < MAX_TOTAL_SUBSCRIPTIONS) {
|
||||
if (no_kind_node->subscription && no_kind_node->subscription->active) {
|
||||
candidates_to_check[candidate_count++] = no_kind_node->subscription;
|
||||
}
|
||||
no_kind_node = no_kind_node->next;
|
||||
}
|
||||
|
||||
DEBUG_TRACE("BROADCAST: Checking %d candidate subscriptions (kind index optimization)", candidate_count);
|
||||
|
||||
// Test each candidate subscription
|
||||
for (int i = 0; i < candidate_count; i++) {
|
||||
subscription_t* sub = candidates_to_check[i];
|
||||
|
||||
if (sub->active && sub->wsi && event_matches_subscription(event, sub)) {
|
||||
temp_sub_t* temp = malloc(sizeof(temp_sub_t));
|
||||
if (temp) {
|
||||
@@ -695,7 +863,6 @@ int broadcast_event_to_subscriptions(cJSON* event) {
|
||||
DEBUG_ERROR("broadcast_event_to_subscriptions: failed to allocate temp subscription");
|
||||
}
|
||||
}
|
||||
sub = sub->next;
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&g_subscription_manager.subscriptions_lock);
|
||||
|
||||
@@ -63,6 +63,18 @@ struct subscription {
|
||||
struct subscription* session_next; // Next subscription for this session
|
||||
};
|
||||
|
||||
// Kind index entry - linked list of subscriptions interested in a specific kind
|
||||
typedef struct kind_subscription_node {
|
||||
subscription_t* subscription; // Pointer to subscription
|
||||
struct kind_subscription_node* next; // Next subscription for this kind
|
||||
} kind_subscription_node_t;
|
||||
|
||||
// No-kind-filter list entry - wrapper to avoid corrupting subscription->next pointer
|
||||
typedef struct no_kind_filter_node {
|
||||
subscription_t* subscription; // Pointer to subscription
|
||||
struct no_kind_filter_node* next; // Next subscription in no-kind list
|
||||
} no_kind_filter_node_t;
|
||||
|
||||
// Per-IP connection tracking
|
||||
typedef struct ip_connection_info {
|
||||
char ip_address[CLIENT_IP_MAX_LENGTH]; // IP address
|
||||
@@ -79,6 +91,10 @@ struct subscription_manager {
|
||||
pthread_mutex_t subscriptions_lock; // Global thread safety
|
||||
int total_subscriptions; // Current count
|
||||
|
||||
// Kind-based index for fast subscription lookup (10x performance improvement)
|
||||
kind_subscription_node_t* kind_index[65536]; // Array of subscription lists, one per kind
|
||||
no_kind_filter_node_t* no_kind_filter_subs; // Subscriptions with no kind filter (wrapper nodes)
|
||||
|
||||
// Configuration
|
||||
int max_subscriptions_per_client; // Default: 20
|
||||
int max_total_subscriptions; // Default: 5000
|
||||
@@ -104,6 +120,11 @@ int event_matches_filter(cJSON* event, subscription_filter_t* filter);
|
||||
int event_matches_subscription(cJSON* event, subscription_t* subscription);
|
||||
int broadcast_event_to_subscriptions(cJSON* event);
|
||||
|
||||
// Kind index functions for performance optimization
|
||||
void init_kind_index(void);
|
||||
void add_subscription_to_kind_index(subscription_t* sub);
|
||||
void remove_subscription_from_kind_index(subscription_t* sub);
|
||||
|
||||
// Per-IP connection tracking functions
|
||||
ip_connection_info_t* get_or_create_ip_connection(const char* client_ip);
|
||||
void update_ip_connection_activity(const char* client_ip);
|
||||
|
||||
@@ -86,6 +86,11 @@ int is_event_expired(cJSON* event, time_t current_time);
|
||||
int handle_req_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
|
||||
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss);
|
||||
|
||||
// Forward declaration for query logging (defined in main.c)
|
||||
extern void log_query_execution(const char* query_type, const char* sub_id,
|
||||
const char* client_ip, const char* sql,
|
||||
long elapsed_us, int rows_returned);
|
||||
|
||||
// Forward declarations for rate limiting
|
||||
int is_client_rate_limited_for_malformed_requests(struct per_session_data *pss);
|
||||
void record_malformed_request(struct per_session_data *pss);
|
||||
@@ -391,6 +396,11 @@ static int nostr_relay_callback(struct lws *wsi, enum lws_callback_reasons reaso
|
||||
memset(pss, 0, sizeof(*pss));
|
||||
pthread_mutex_init(&pss->session_lock, NULL);
|
||||
|
||||
// Initialize database query tracking
|
||||
pss->db_queries_executed = 0;
|
||||
pss->db_rows_returned = 0;
|
||||
pss->query_tracking_start = time(NULL);
|
||||
|
||||
// Get real client IP address
|
||||
char client_ip[CLIENT_IP_MAX_LENGTH];
|
||||
memset(client_ip, 0, sizeof(client_ip));
|
||||
@@ -2429,7 +2439,7 @@ int process_dm_stats_command(cJSON* dm_event, char* error_message, size_t error_
|
||||
|
||||
// Handle NIP-45 COUNT message
|
||||
int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, struct per_session_data *pss) {
|
||||
(void)pss; // Suppress unused parameter warning
|
||||
// pss is now used for query tracking, so remove unused warning suppression
|
||||
|
||||
if (!cJSON_IsArray(filters)) {
|
||||
DEBUG_ERROR("COUNT filters is not an array");
|
||||
@@ -2687,6 +2697,10 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
|
||||
}
|
||||
|
||||
// Execute count query
|
||||
|
||||
// Start query timing
|
||||
struct timespec query_start, query_end;
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_start);
|
||||
|
||||
// Execute count query
|
||||
sqlite3_stmt* stmt;
|
||||
@@ -2711,6 +2725,15 @@ int handle_count_message(const char* sub_id, cJSON* filters, struct lws *wsi, st
|
||||
// Filter count calculated
|
||||
|
||||
sqlite3_finalize(stmt);
|
||||
|
||||
// Stop query timing and log
|
||||
clock_gettime(CLOCK_MONOTONIC, &query_end);
|
||||
long elapsed_us = (query_end.tv_sec - query_start.tv_sec) * 1000000L +
|
||||
(query_end.tv_nsec - query_start.tv_nsec) / 1000L;
|
||||
|
||||
log_query_execution("COUNT", sub_id, pss ? pss->client_ip : NULL,
|
||||
sql, elapsed_us, 1); // COUNT always returns 1 row
|
||||
|
||||
total_count += filter_count;
|
||||
}
|
||||
|
||||
|
||||
@@ -79,6 +79,11 @@ struct per_session_data {
|
||||
size_t reassembly_size; // Current size of accumulated data
|
||||
size_t reassembly_capacity; // Allocated capacity of reassembly buffer
|
||||
int reassembly_active; // Flag: 1 if currently reassembling a message
|
||||
|
||||
// Database query tracking for abuse detection and monitoring
|
||||
int db_queries_executed; // Total SELECT queries executed by this connection
|
||||
int db_rows_returned; // Total rows returned across all queries
|
||||
time_t query_tracking_start; // When connection was established (for rate calculation)
|
||||
};
|
||||
|
||||
// NIP-11 HTTP session data structure for managing buffer lifetime
|
||||
|
||||
270
tests/bulk_retrieval_test.sh
Executable file
270
tests/bulk_retrieval_test.sh
Executable file
@@ -0,0 +1,270 @@
|
||||
#!/bin/bash
|
||||
# Bulk Event Retrieval Performance Test
|
||||
# Tests retrieving hundreds of events to measure JSON reconstruction performance
|
||||
|
||||
# Load test keys
|
||||
source tests/.test_keys.txt
|
||||
|
||||
RELAY_URL="${RELAY_URL:-ws://localhost:8888}"
|
||||
NUM_EVENTS=500
|
||||
|
||||
# Use test secret keys for creating valid events
|
||||
SECRET_KEYS=(
|
||||
"3fdd8227a920c2385559400b2b14e464f22e80df312a73cc7a86e1d7e91d608f"
|
||||
"a156011cd65b71f84b4a488ac81687f2aed57e490b31c28f58195d787030db60"
|
||||
"1618aaa21f5bd45c5ffede0d9a60556db67d4a046900e5f66b0bae5c01c801fb"
|
||||
)
|
||||
|
||||
echo "=========================================="
|
||||
echo "Bulk Event Retrieval Performance Test"
|
||||
echo "=========================================="
|
||||
echo "Relay: $RELAY_URL"
|
||||
echo "Target: Retrieve $NUM_EVENTS events"
|
||||
echo ""
|
||||
|
||||
# Check if relay is running
|
||||
echo "Checking if relay is running..."
|
||||
if ! nc -z localhost 8888 2>/dev/null; then
|
||||
echo "ERROR: Relay is not running on port 8888"
|
||||
exit 1
|
||||
fi
|
||||
echo "✓ Relay is running"
|
||||
echo ""
|
||||
|
||||
# Check if nak is installed
|
||||
if ! command -v nak &> /dev/null; then
|
||||
echo "ERROR: 'nak' command not found. Please install nak:"
|
||||
echo " go install github.com/fiatjaf/nak@latest"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check current event count in database
|
||||
DB_FILE=$(ls build/*.db 2>/dev/null | head -1)
|
||||
if [ -n "$DB_FILE" ]; then
|
||||
CURRENT_COUNT=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM events WHERE kind=1;" 2>/dev/null || echo "0")
|
||||
echo "Current kind 1 events in database: $CURRENT_COUNT"
|
||||
|
||||
if [ "$CURRENT_COUNT" -ge "$NUM_EVENTS" ]; then
|
||||
echo "✓ Database already has $CURRENT_COUNT events (>= $NUM_EVENTS required)"
|
||||
echo " Skipping event posting..."
|
||||
echo ""
|
||||
else
|
||||
EVENTS_TO_POST=$((NUM_EVENTS - CURRENT_COUNT))
|
||||
echo "Need to post $EVENTS_TO_POST more events..."
|
||||
echo ""
|
||||
|
||||
# Post additional events
|
||||
echo "Posting $EVENTS_TO_POST test events using nak..."
|
||||
for i in $(seq 1 $EVENTS_TO_POST); do
|
||||
# Cycle through secret keys
|
||||
KEY_INDEX=$(( (i - 1) % ${#SECRET_KEYS[@]} ))
|
||||
CURRENT_KEY=${SECRET_KEYS[$KEY_INDEX]}
|
||||
|
||||
# Create content
|
||||
CONTENT="Bulk test event $i/$EVENTS_TO_POST for performance testing"
|
||||
|
||||
# Post event using nak (properly signed)
|
||||
nak event -c "$CONTENT" --sec "$CURRENT_KEY" "$RELAY_URL" >/dev/null 2>&1
|
||||
|
||||
# Progress indicator
|
||||
if [ $((i % 50)) -eq 0 ]; then
|
||||
echo " Posted $i/$EVENTS_TO_POST events..."
|
||||
fi
|
||||
done
|
||||
echo "✓ Posted $EVENTS_TO_POST test events"
|
||||
echo ""
|
||||
fi
|
||||
else
|
||||
echo "WARNING: Could not find database file"
|
||||
echo "Posting $NUM_EVENTS events anyway..."
|
||||
echo ""
|
||||
|
||||
# Post events
|
||||
echo "Posting $NUM_EVENTS test events using nak..."
|
||||
for i in $(seq 1 $NUM_EVENTS); do
|
||||
KEY_INDEX=$(( (i - 1) % ${#SECRET_KEYS[@]} ))
|
||||
CURRENT_KEY=${SECRET_KEYS[$KEY_INDEX]}
|
||||
CONTENT="Bulk test event $i/$NUM_EVENTS for performance testing"
|
||||
nak event -c "$CONTENT" --sec "$CURRENT_KEY" "$RELAY_URL" >/dev/null 2>&1
|
||||
|
||||
if [ $((i % 50)) -eq 0 ]; then
|
||||
echo " Posted $i/$NUM_EVENTS events..."
|
||||
fi
|
||||
done
|
||||
echo "✓ Posted $NUM_EVENTS test events"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Wait for events to be stored
|
||||
echo "Waiting 2 seconds for events to be stored..."
|
||||
sleep 2
|
||||
echo ""
|
||||
|
||||
# Test 1: Retrieve 500 events using nak req
|
||||
echo "=========================================="
|
||||
echo "TEST 1: Retrieve $NUM_EVENTS events"
|
||||
echo "=========================================="
|
||||
echo "Sending REQ with limit=$NUM_EVENTS..."
|
||||
echo ""
|
||||
|
||||
START_TIME=$(date +%s%N)
|
||||
|
||||
# Use nak req to retrieve events (properly handles subscription protocol)
|
||||
RESPONSE=$(nak req -k 1 -l $NUM_EVENTS "$RELAY_URL" 2>/dev/null)
|
||||
|
||||
END_TIME=$(date +%s%N)
|
||||
ELAPSED_MS=$(( (END_TIME - START_TIME) / 1000000 ))
|
||||
|
||||
# Count events received (each line is one event)
|
||||
EVENT_COUNT=$(echo "$RESPONSE" | grep -c '^{')
|
||||
|
||||
echo "Results:"
|
||||
echo " Time elapsed: ${ELAPSED_MS}ms"
|
||||
echo " Events received: $EVENT_COUNT"
|
||||
echo ""
|
||||
|
||||
if [ $EVENT_COUNT -ge $((NUM_EVENTS - 10)) ]; then
|
||||
echo "✓ TEST 1 PASSED: Retrieved $EVENT_COUNT events in ${ELAPSED_MS}ms"
|
||||
if [ $ELAPSED_MS -lt 100 ]; then
|
||||
echo " ⚡ EXCELLENT: <100ms for $EVENT_COUNT events!"
|
||||
elif [ $ELAPSED_MS -lt 500 ]; then
|
||||
echo " ✓ GOOD: <500ms for $EVENT_COUNT events"
|
||||
elif [ $ELAPSED_MS -lt 2000 ]; then
|
||||
echo " ⚠ ACCEPTABLE: <2s for $EVENT_COUNT events"
|
||||
else
|
||||
echo " ⚠ SLOW: ${ELAPSED_MS}ms for $EVENT_COUNT events (expected <100ms)"
|
||||
fi
|
||||
else
|
||||
echo "✗ TEST 1 FAILED: Only retrieved $EVENT_COUNT events (expected ~$NUM_EVENTS)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test 2: Retrieve events by author (use first test key's pubkey)
|
||||
echo "=========================================="
|
||||
echo "TEST 2: Retrieve events by author"
|
||||
echo "=========================================="
|
||||
echo "Sending REQ with authors filter..."
|
||||
echo ""
|
||||
|
||||
# Get pubkey from first secret key
|
||||
TEST_PUBKEY=$(nak key public ${SECRET_KEYS[0]})
|
||||
|
||||
START_TIME=$(date +%s%N)
|
||||
|
||||
RESPONSE=$(nak req -k 1 -a "$TEST_PUBKEY" -l $NUM_EVENTS "$RELAY_URL" 2>/dev/null)
|
||||
|
||||
END_TIME=$(date +%s%N)
|
||||
ELAPSED_MS=$(( (END_TIME - START_TIME) / 1000000 ))
|
||||
|
||||
EVENT_COUNT=$(echo "$RESPONSE" | grep -c '^{')
|
||||
|
||||
echo "Results:"
|
||||
echo " Time elapsed: ${ELAPSED_MS}ms"
|
||||
echo " Events received: $EVENT_COUNT"
|
||||
echo " (Note: Only events from first test key, ~1/3 of total)"
|
||||
echo ""
|
||||
|
||||
if [ $EVENT_COUNT -ge $((NUM_EVENTS / 3 - 20)) ]; then
|
||||
echo "✓ TEST 2 PASSED: Retrieved $EVENT_COUNT events in ${ELAPSED_MS}ms"
|
||||
else
|
||||
echo "⚠ TEST 2 WARNING: Only retrieved $EVENT_COUNT events (expected ~$((NUM_EVENTS / 3)))"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test 3: Retrieve events with time filter
|
||||
echo "=========================================="
|
||||
echo "TEST 3: Retrieve events with time filter"
|
||||
echo "=========================================="
|
||||
echo "Sending REQ with since filter (last hour)..."
|
||||
echo ""
|
||||
|
||||
SINCE_TIME=$(($(date +%s) - 3600))
|
||||
|
||||
START_TIME=$(date +%s%N)
|
||||
|
||||
RESPONSE=$(nak req -k 1 --since "$SINCE_TIME" -l $NUM_EVENTS "$RELAY_URL" 2>/dev/null)
|
||||
|
||||
END_TIME=$(date +%s%N)
|
||||
ELAPSED_MS=$(( (END_TIME - START_TIME) / 1000000 ))
|
||||
|
||||
EVENT_COUNT=$(echo "$RESPONSE" | grep -c '^{')
|
||||
|
||||
echo "Results:"
|
||||
echo " Time elapsed: ${ELAPSED_MS}ms"
|
||||
echo " Events received: $EVENT_COUNT"
|
||||
echo ""
|
||||
|
||||
if [ $EVENT_COUNT -ge $((NUM_EVENTS - 10)) ]; then
|
||||
echo "✓ TEST 3 PASSED: Retrieved $EVENT_COUNT events in ${ELAPSED_MS}ms"
|
||||
else
|
||||
echo "⚠ TEST 3 WARNING: Only retrieved $EVENT_COUNT events (expected ~$NUM_EVENTS)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Test 4: Multiple small retrievals (simulating real-world usage)
|
||||
echo "=========================================="
|
||||
echo "TEST 4: Multiple small retrievals (50 events × 10 times)"
|
||||
echo "=========================================="
|
||||
echo "Simulating real-world client behavior..."
|
||||
echo ""
|
||||
|
||||
TOTAL_TIME=0
|
||||
TOTAL_EVENTS=0
|
||||
for i in $(seq 1 10); do
|
||||
START_TIME=$(date +%s%N)
|
||||
|
||||
RESPONSE=$(nak req -k 1 -l 50 "$RELAY_URL" 2>/dev/null)
|
||||
|
||||
END_TIME=$(date +%s%N)
|
||||
ELAPSED_MS=$(( (END_TIME - START_TIME) / 1000000 ))
|
||||
TOTAL_TIME=$((TOTAL_TIME + ELAPSED_MS))
|
||||
|
||||
EVENT_COUNT=$(echo "$RESPONSE" | grep -c '^{')
|
||||
TOTAL_EVENTS=$((TOTAL_EVENTS + EVENT_COUNT))
|
||||
echo " Request $i: ${ELAPSED_MS}ms ($EVENT_COUNT events)"
|
||||
done
|
||||
|
||||
AVG_TIME=$((TOTAL_TIME / 10))
|
||||
|
||||
echo ""
|
||||
echo "Results:"
|
||||
echo " Total time: ${TOTAL_TIME}ms"
|
||||
echo " Total events: $TOTAL_EVENTS"
|
||||
echo " Average time per request: ${AVG_TIME}ms"
|
||||
echo ""
|
||||
|
||||
if [ $AVG_TIME -lt 50 ]; then
|
||||
echo "✓ TEST 4 PASSED: Average retrieval time ${AVG_TIME}ms (excellent)"
|
||||
elif [ $AVG_TIME -lt 200 ]; then
|
||||
echo "✓ TEST 4 PASSED: Average retrieval time ${AVG_TIME}ms (good)"
|
||||
else
|
||||
echo "⚠ TEST 4 WARNING: Average retrieval time ${AVG_TIME}ms (slow)"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# Performance Summary
|
||||
echo "=========================================="
|
||||
echo "PERFORMANCE SUMMARY"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Expected performance with event_json optimization:"
|
||||
echo " - 366 events: <10ms (previously 18 seconds)"
|
||||
echo " - 500 events: <15ms"
|
||||
echo " - Per-event overhead: ~0.02ms (vs 50ms before)"
|
||||
echo ""
|
||||
|
||||
if [ -n "$DB_FILE" ]; then
|
||||
FINAL_COUNT=$(sqlite3 "$DB_FILE" "SELECT COUNT(*) FROM events WHERE kind=1;" 2>/dev/null || echo "0")
|
||||
echo "Final database stats:"
|
||||
echo " Total kind 1 events: $FINAL_COUNT"
|
||||
echo " Database file: $DB_FILE"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "Check relay logs for [QUERY] entries to see actual query times:"
|
||||
echo " journalctl -u c-relay -n 100 | grep QUERY"
|
||||
echo ""
|
||||
|
||||
echo "=========================================="
|
||||
echo "Test Complete"
|
||||
echo "=========================================="
|
||||
Reference in New Issue
Block a user