summaryrefslogtreecommitdiff
path: root/benchmark-100mb.sh
blob: 1d3fad0d49a57a9f51fb2b54355da20c55ec53a7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
#!/bin/bash
# Benchmark script: Generate and ingest 100MB of historic metrics
# This tests Epimetheus performance with large-scale data ingestion

set -e

# Optimize Go GC for better performance (Phase 3 optimization)
export GOGC=200  # Reduce GC frequency (default 100)
export GOMEMLIMIT=3GiB  # Set memory limit for Go 1.19+

BENCHMARK_DIR="benchmark-results"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
RESULT_FILE="$BENCHMARK_DIR/benchmark-$TIMESTAMP.log"

mkdir -p "$BENCHMARK_DIR"

echo "=== Epimetheus 100MB Benchmark ===" | tee "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "Timestamp: $(date)" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 1: Generate 100MB of test data
echo "Step 1: Generating 100MB of test data..." | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Calculate: ~70 bytes per line, 100MB = ~1.5M lines
TARGET_SIZE_MB=100
TARGET_BYTES=$((TARGET_SIZE_MB * 1024 * 1024))
BYTES_PER_LINE=70
TARGET_LINES=$((TARGET_BYTES / BYTES_PER_LINE))

echo "Target size: ${TARGET_SIZE_MB}MB" | tee -a "$RESULT_FILE"
echo "Estimated lines needed: $TARGET_LINES" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Generate data going back 7 days with 1-minute intervals
# This gives us ~10,080 data points across 7 days
# We'll generate multiple metrics per timestamp to reach 100MB
# All data is historic (> 5 minutes old) to use Remote Write API exclusively

GENERATION_START=$(date +%s)

NOW=$(date +%s)000  # Current time in milliseconds
ONE_HOUR_AGO=$((NOW - 3600000))  # Start from 1 hour ago to ensure all data is historic
SEVEN_DAYS_AGO=$((ONE_HOUR_AGO - 604800000))  # 7 days before that

# CSV header
cat > benchmark-data-100mb.csv << 'EOF'
# Prometheus metrics - 100MB benchmark dataset
# Format: metric_name,labels,value,timestamp_ms
EOF

# Generate metrics
# We'll create ~150 unique time series, each with ~10,000 data points = 1.5M samples
METRICS=(
  "epimetheus_benchmark_cpu_usage"
  "epimetheus_benchmark_memory_bytes"
  "epimetheus_benchmark_disk_io_bytes"
  "epimetheus_benchmark_network_rx_bytes"
  "epimetheus_benchmark_network_tx_bytes"
  "epimetheus_benchmark_requests_total"
  "epimetheus_benchmark_errors_total"
  "epimetheus_benchmark_response_time_ms"
  "epimetheus_benchmark_active_connections"
  "epimetheus_benchmark_queue_depth"
)

INSTANCES=(
  "web-01" "web-02" "web-03" "web-04" "web-05"
  "api-01" "api-02" "api-03" "api-04" "api-05"
  "db-01" "db-02" "db-03" "worker-01" "worker-02"
)

INTERVAL_MS=60000  # 1 minute interval
TOTAL_INTERVALS=10080  # 7 days of 1-minute intervals

echo "Generating data..." | tee -a "$RESULT_FILE"
LINES_GENERATED=0

for ((i=0; i<TOTAL_INTERVALS; i++)); do
  TIMESTAMP=$((SEVEN_DAYS_AGO + (i * INTERVAL_MS)))

  # Generate a sample for each metric x instance combination
  for METRIC in "${METRICS[@]}"; do
    for INSTANCE in "${INSTANCES[@]}"; do
      VALUE=$((RANDOM % 1000))
      echo "$METRIC,instance=$INSTANCE;env=benchmark,$VALUE,$TIMESTAMP" >> benchmark-data-100mb.csv
      LINES_GENERATED=$((LINES_GENERATED + 1))
    done
  done

  # Progress indicator every 1000 intervals
  if [ $((i % 1000)) -eq 0 ]; then
    PROGRESS=$((i * 100 / TOTAL_INTERVALS))
    echo -ne "\rProgress: $PROGRESS% ($LINES_GENERATED lines)" | tee -a "$RESULT_FILE"
  fi
done

echo "" | tee -a "$RESULT_FILE"

GENERATION_END=$(date +%s)
GENERATION_TIME=$((GENERATION_END - GENERATION_START))

# Get actual file size
FILE_SIZE=$(stat -f%z benchmark-data-100mb.csv 2>/dev/null || stat -c%s benchmark-data-100mb.csv 2>/dev/null)
FILE_SIZE_MB=$((FILE_SIZE / 1024 / 1024))

echo "" | tee -a "$RESULT_FILE"
echo "Data generation complete:" | tee -a "$RESULT_FILE"
echo "  Lines generated: $LINES_GENERATED" | tee -a "$RESULT_FILE"
echo "  File size: ${FILE_SIZE_MB}MB ($FILE_SIZE bytes)" | tee -a "$RESULT_FILE"
echo "  Generation time: ${GENERATION_TIME}s" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 2: Start port-forward to Prometheus
echo "Step 2: Setting up port-forward to Prometheus..." | tee -a "$RESULT_FILE"
kubectl port-forward -n monitoring svc/prometheus-kube-prometheus-prometheus 9090:9090 > /tmp/benchmark-pf.log 2>&1 &
PF_PID=$!
echo "Port-forward started (PID: $PF_PID)" | tee -a "$RESULT_FILE"
sleep 8  # Wait for port-forward to be ready
echo "" | tee -a "$RESULT_FILE"

# Step 3: Get baseline Prometheus metrics
echo "Step 3: Collecting baseline Prometheus metrics..." | tee -a "$RESULT_FILE"
PROM_POD=$(kubectl get pod -n monitoring -l app.kubernetes.io/name=prometheus -o jsonpath='{.items[0].metadata.name}')
echo "Prometheus pod: $PROM_POD" | tee -a "$RESULT_FILE"

# Get memory and CPU usage before ingestion
BASELINE_MEMORY=$(kubectl top pod -n monitoring "$PROM_POD" --no-headers | awk '{print $3}')
BASELINE_CPU=$(kubectl top pod -n monitoring "$PROM_POD" --no-headers | awk '{print $2}')

echo "  Baseline memory: $BASELINE_MEMORY" | tee -a "$RESULT_FILE"
echo "  Baseline CPU: $BASELINE_CPU" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 4: Run ingestion benchmark
echo "Step 4: Running ingestion benchmark..." | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

INGEST_START=$(date +%s.%N)

# Run epimetheus with time measurement
# Use CSV mode with Remote Write API (all data is historic)
# Note: We can't use auto mode because it requires both Pushgateway and Remote Write
# Instead, we'll implement a direct CSV->Remote Write ingestion

echo "Parsing CSV and preparing for Remote Write ingestion..." | tee -a "$RESULT_FILE"

# For now, use backfill mode to process the CSV data
# We'll need to enhance epimetheus to support pure CSV->RemoteWrite mode
echo "WARNING: Using auto mode - this may fail if data is too recent" | tee -a "$RESULT_FILE"
echo "Continuing with Remote Write API for historic data..." | tee -a "$RESULT_FILE"

/usr/bin/time -v ./epimetheus \
  -mode=auto \
  -file=benchmark-data-100mb.csv \
  -format=csv \
  -prometheus=http://localhost:9090/api/v1/write \
  -pushgateway=http://localhost:9091 \
  2>&1 | tee -a "$RESULT_FILE" || true  # Continue even if pushgateway fails

INGEST_END=$(date +%s.%N)

# Calculate ingestion time
INGEST_TIME=$(echo "$INGEST_END - $INGEST_START" | bc)

echo "" | tee -a "$RESULT_FILE"
echo "Ingestion complete:" | tee -a "$RESULT_FILE"
echo "  Total time: ${INGEST_TIME}s" | tee -a "$RESULT_FILE"

# Calculate throughput
SAMPLES_PER_SECOND=$(echo "scale=2; $LINES_GENERATED / $INGEST_TIME" | bc)
MB_PER_SECOND=$(echo "scale=2; $FILE_SIZE_MB / $INGEST_TIME" | bc)

echo "  Samples/second: $SAMPLES_PER_SECOND" | tee -a "$RESULT_FILE"
echo "  MB/second: $MB_PER_SECOND" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 5: Get post-ingestion Prometheus metrics
echo "Step 5: Collecting post-ingestion Prometheus metrics..." | tee -a "$RESULT_FILE"
sleep 5  # Wait for metrics to stabilize

POST_MEMORY=$(kubectl top pod -n monitoring "$PROM_POD" --no-headers | awk '{print $3}')
POST_CPU=$(kubectl top pod -n monitoring "$PROM_POD" --no-headers | awk '{print $2}')

echo "  Post-ingestion memory: $POST_MEMORY" | tee -a "$RESULT_FILE"
echo "  Post-ingestion CPU: $POST_CPU" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 6: Query some data to verify ingestion
echo "Step 6: Verifying data ingestion..." | tee -a "$RESULT_FILE"
QUERY_RESULT=$(curl -s "http://localhost:9090/api/v1/query?query=count(epimetheus_benchmark_cpu_usage)" | jq -r '.data.result[0].value[1]')
echo "  Samples found for epimetheus_benchmark_cpu_usage: $QUERY_RESULT" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"

# Step 7: Cleanup
echo "Step 7: Cleaning up..." | tee -a "$RESULT_FILE"
kill $PF_PID 2>/dev/null || true
echo "" | tee -a "$RESULT_FILE"

# Summary
echo "=== BENCHMARK SUMMARY ===" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "Dataset:" | tee -a "$RESULT_FILE"
echo "  Size: ${FILE_SIZE_MB}MB" | tee -a "$RESULT_FILE"
echo "  Samples: $LINES_GENERATED" | tee -a "$RESULT_FILE"
echo "  Time range: 7 days" | tee -a "$RESULT_FILE"
echo "  Interval: 1 minute" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "Performance:" | tee -a "$RESULT_FILE"
echo "  Generation time: ${GENERATION_TIME}s" | tee -a "$RESULT_FILE"
echo "  Ingestion time: ${INGEST_TIME}s" | tee -a "$RESULT_FILE"
echo "  Throughput: $SAMPLES_PER_SECOND samples/s" | tee -a "$RESULT_FILE"
echo "  Throughput: $MB_PER_SECOND MB/s" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "Resources:" | tee -a "$RESULT_FILE"
echo "  Memory: $BASELINE_MEMORY -> $POST_MEMORY" | tee -a "$RESULT_FILE"
echo "  CPU: $BASELINE_CPU -> $POST_CPU" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "Results saved to: $RESULT_FILE" | tee -a "$RESULT_FILE"
echo "" | tee -a "$RESULT_FILE"
echo "To view results: cat $RESULT_FILE"
echo "To analyze: less $RESULT_FILE"