Files
trading_bot_v4/scripts/sensitivity_analysis.py
mindesbunister cc56b72df2 fix: Database-first cluster status detection + Stop button clarification
CRITICAL FIX (Nov 30, 2025):
- Dashboard showed 'idle' despite 22+ worker processes running
- Root cause: SSH-based worker detection timing out
- Solution: Check database for running chunks FIRST

Changes:
1. app/api/cluster/status/route.ts:
   - Query exploration database before SSH detection
   - If running chunks exist, mark workers 'active' even if SSH fails
   - Override worker status: 'offline' → 'active' when chunks running
   - Log: ' Cluster status: ACTIVE (database shows running chunks)'
   - Database is source of truth, SSH only for supplementary metrics

2. app/cluster/page.tsx:
   - Stop button ALREADY EXISTS (conditionally shown)
   - Shows Start when status='idle', Stop when status='active'
   - No code changes needed - fixed by status detection

Result:
- Dashboard now shows 'ACTIVE' with 2 workers (correct)
- Workers show 'active' status (was 'offline')
- Stop button automatically visible when cluster active
- System resilient to SSH timeouts/network issues

Verified:
- Container restarted: Nov 30 21:18 UTC
- API tested: Returns status='active', activeWorkers=2
- Logs confirm: Database-first logic working
- Workers confirmed running: 22+ processes on worker1, workers on worker2
2025-11-30 22:23:01 +01:00

217 lines
7.4 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Sensitivity Analysis - Single Parameter Impact Testing
Purpose: Measure exact impact of each parameter in isolation
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
from typing import Dict, List, Tuple
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.append(str(PROJECT_ROOT))
import pandas as pd
from pathlib import Path
from tqdm import tqdm
from backtester.data_loader import load_csv
from backtester.indicators.money_line import MoneyLineInputs
from backtester.simulator import TradeConfig, simulate_money_line
def test_single_parameter_sensitivity(
csv_path: Path,
symbol: str,
param_name: str,
values: List[float]
) -> List[Tuple[float, int, float, float, float]]:
"""
Vary one parameter while holding others at baseline.
Returns: [(value, trades, pnl, win_rate, profit_factor), ...]
"""
data_slice = load_csv(Path(csv_path), symbol, "5m")
df = data_slice.data
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
results = []
baseline = MoneyLineInputs()
print(f"\n{'='*80}")
print(f"TESTING: {param_name}")
print(f"Values: {values}")
print(f"{'='*80}")
for val in tqdm(values, desc=f"Testing {param_name}", leave=False):
# Create inputs with single parameter changed
kwargs = {param_name: val}
inputs = MoneyLineInputs(**kwargs)
result = simulate_money_line(df, symbol, inputs, config)
results.append((
val,
len(result.trades),
result.total_pnl,
result.win_rate,
result.profit_factor if result.profit_factor != float('inf') else 999.0
))
print(f"{param_name}={val:6.2f}: "
f"{len(result.trades):3d} trades | "
f"${result.total_pnl:8.2f} PnL | "
f"{result.win_rate:5.1f}% WR | "
f"PF={result.profit_factor:.3f}")
return results
def analyze_sensitivity(
param_name: str,
results: List[Tuple[float, int, float, float, float]]
):
"""Analyze parameter sensitivity results."""
values = [r[0] for r in results]
trades = [r[1] for r in results]
pnls = [r[2] for r in results]
win_rates = [r[3] for r in results]
profit_factors = [r[4] for r in results]
# Find best by different metrics
best_pnl_idx = pnls.index(max(pnls))
best_wr_idx = win_rates.index(max(win_rates))
best_pf_idx = profit_factors.index(max(profit_factors))
print(f"\n{'='*80}")
print(f"SENSITIVITY ANALYSIS: {param_name}")
print(f"{'='*80}")
# Trade count variation
min_trades, max_trades = min(trades), max(trades)
if max_trades > 0:
trade_variation = (max_trades - min_trades) / max_trades * 100
print(f"Trade Count Range: {min_trades}-{max_trades} ({trade_variation:.1f}% variation)")
# PnL variation
min_pnl, max_pnl = min(pnls), max(pnls)
pnl_range = max_pnl - min_pnl
print(f"PnL Range: ${min_pnl:.2f} to ${max_pnl:.2f} (${pnl_range:.2f} swing)")
# Best values
print(f"\nBest PnL: {values[best_pnl_idx]:.2f} → ${pnls[best_pnl_idx]:.2f}")
print(f"Best WR: {values[best_wr_idx]:.2f}{win_rates[best_wr_idx]:.1f}%")
print(f"Best PF: {values[best_pf_idx]:.2f}{profit_factors[best_pf_idx]:.3f}")
# Sensitivity classification
if pnl_range < 50:
print(f"\n💡 {param_name}: LOW SENSITIVITY")
print(f" PnL swing only ${pnl_range:.2f} - parameter has minimal impact")
elif pnl_range < 200:
print(f"\n💡 {param_name}: MODERATE SENSITIVITY")
print(f" PnL swing ${pnl_range:.2f} - worth tuning but not critical")
else:
print(f"\n💡 {param_name}: HIGH SENSITIVITY")
print(f" PnL swing ${pnl_range:.2f} - CRITICAL parameter to optimize")
def run_full_sensitivity_suite(csv_path: Path, symbol: str):
"""Test all parameters for sensitivity."""
test_configs = [
("flip_threshold_percent", [0.3, 0.4, 0.5, 0.6, 0.7, 0.8]),
("ma_gap_threshold", [0.15, 0.25, 0.35, 0.45, 0.55, 0.65]),
("momentum_min_adx", [15.0, 18.0, 21.0, 24.0, 27.0, 30.0]),
("momentum_long_max_pos", [60.0, 65.0, 70.0, 75.0, 80.0]),
("momentum_short_min_pos", [15.0, 20.0, 25.0, 30.0, 35.0]),
("cooldown_bars", [1, 2, 3, 4, 5]),
("momentum_spacing", [2, 3, 4, 5, 6]),
("momentum_cooldown", [1, 2, 3, 4, 5])
]
all_results = {}
print(f"\n{'='*80}")
print("RUNNING FULL SENSITIVITY SUITE")
print(f"{'='*80}")
print(f"Testing {len(test_configs)} parameters")
print()
for param_name, values in tqdm(test_configs, desc="Overall progress"):
results = test_single_parameter_sensitivity(csv_path, symbol, param_name, values)
all_results[param_name] = results
analyze_sensitivity(param_name, results)
# Summary ranking
print(f"\n{'='*80}")
print("PARAMETER SENSITIVITY RANKING")
print(f"{'='*80}")
# Calculate PnL ranges for ranking
rankings = []
for param_name, results in all_results.items():
pnls = [r[2] for r in results]
pnl_range = max(pnls) - min(pnls)
rankings.append((param_name, pnl_range))
rankings.sort(key=lambda x: x[1], reverse=True)
print("\nOptimization Priority (by PnL impact):")
for i, (param_name, pnl_range) in enumerate(rankings, 1):
if pnl_range > 200:
impact = "🔴 CRITICAL"
elif pnl_range > 100:
impact = "🟡 HIGH"
elif pnl_range > 50:
impact = "🟢 MODERATE"
else:
impact = "⚪ LOW"
print(f"{i}. {param_name:30s}: ${pnl_range:8.2f} swing - {impact}")
def main():
parser = argparse.ArgumentParser(description="Single-parameter sensitivity analysis")
parser.add_argument("--csv", type=Path, required=True, help="Path to OHLCV CSV")
parser.add_argument("--symbol", default="SOL-PERP", help="Symbol name")
parser.add_argument("--param", help="Test specific parameter only")
args = parser.parse_args()
print("="*80)
print("V9 SENSITIVITY ANALYSIS")
print("="*80)
print(f"Data: {args.csv}")
print(f"Symbol: {args.symbol}")
if args.param:
# Test single parameter
test_values = {
"flip_threshold_percent": [0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
"ma_gap_threshold": [0.15, 0.25, 0.35, 0.45, 0.55, 0.65],
"momentum_min_adx": [15.0, 18.0, 21.0, 24.0, 27.0, 30.0],
"momentum_long_max_pos": [60.0, 65.0, 70.0, 75.0, 80.0],
"momentum_short_min_pos": [15.0, 20.0, 25.0, 30.0, 35.0],
"cooldown_bars": [1, 2, 3, 4, 5],
"momentum_spacing": [2, 3, 4, 5, 6],
"momentum_cooldown": [1, 2, 3, 4, 5]
}
if args.param not in test_values:
print(f"❌ Unknown parameter: {args.param}")
print(f"Available: {', '.join(test_values.keys())}")
return
results = test_single_parameter_sensitivity(
args.csv, args.symbol, args.param, test_values[args.param]
)
analyze_sensitivity(args.param, results)
else:
# Test all parameters
run_full_sensitivity_suite(args.csv, args.symbol)
print(f"\n{'='*80}")
if __name__ == "__main__":
main()