Files
trading_bot_v4/scripts/diagnostic_sweep.py
mindesbunister cc56b72df2 fix: Database-first cluster status detection + Stop button clarification
CRITICAL FIX (Nov 30, 2025):
- Dashboard showed 'idle' despite 22+ worker processes running
- Root cause: SSH-based worker detection timing out
- Solution: Check database for running chunks FIRST

Changes:
1. app/api/cluster/status/route.ts:
   - Query exploration database before SSH detection
   - If running chunks exist, mark workers 'active' even if SSH fails
   - Override worker status: 'offline' → 'active' when chunks running
   - Log: ' Cluster status: ACTIVE (database shows running chunks)'
   - Database is source of truth, SSH only for supplementary metrics

2. app/cluster/page.tsx:
   - Stop button ALREADY EXISTS (conditionally shown)
   - Shows Start when status='idle', Stop when status='active'
   - No code changes needed - fixed by status detection

Result:
- Dashboard now shows 'ACTIVE' with 2 workers (correct)
- Workers show 'active' status (was 'offline')
- Stop button automatically visible when cluster active
- System resilient to SSH timeouts/network issues

Verified:
- Container restarted: Nov 30 21:18 UTC
- API tested: Returns status='active', activeWorkers=2
- Logs confirm: Database-first logic working
- Workers confirmed running: 22+ processes on worker1, workers on worker2
2025-11-30 22:23:01 +01:00

213 lines
8.3 KiB
Python
Executable File

#!/usr/bin/env python3
"""
Diagnostic Parameter Sweep - Verify Parameters Actually Control Behavior
Purpose: Determine if parameter insensitivity is a bug or reality
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
from typing import Dict, List, Tuple
PROJECT_ROOT = Path(__file__).resolve().parents[1]
if str(PROJECT_ROOT) not in sys.path:
sys.path.append(str(PROJECT_ROOT))
import pandas as pd
from pathlib import Path
from tqdm import tqdm
from backtester.data_loader import load_csv
from backtester.indicators.money_line import MoneyLineInputs, money_line_signals
from backtester.simulator import TradeConfig, simulate_money_line
def test_parameter_impact(csv_path: Path, symbol: str) -> Dict[str, List[Tuple[float, int, float]]]:
"""
Test if each parameter actually changes behavior.
Returns: {param_name: [(param_value, num_signals, total_pnl), ...]}
"""
print("Loading data...")
data_slice = load_csv(Path(csv_path), symbol, "5m")
df = data_slice.data
print(f"Loaded {len(df)} bars")
baseline = MoneyLineInputs()
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
results = {}
# Test 1: flip_threshold_percent (should dramatically affect signal count)
print("\n" + "="*60)
print("TEST 1: flip_threshold_percent (0.4, 0.5, 0.6, 0.7)")
print("Expected: LOWER threshold = MORE signals")
print("="*60)
flip_results = []
for val in tqdm([0.4, 0.5, 0.6, 0.7], desc="Testing flip_threshold"):
inputs = MoneyLineInputs(flip_threshold_percent=val)
result = simulate_money_line(df, symbol, inputs, config)
flip_results.append((val, len(result.trades), result.total_pnl))
print(f"flip_threshold={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
results['flip_threshold_percent'] = flip_results
# Test 2: cooldown_bars (should affect signal frequency)
print("\n" + "="*60)
print("TEST 2: cooldown_bars (1, 2, 3, 4)")
print("Expected: LOWER cooldown = MORE signals")
print("="*60)
cooldown_results = []
for val in tqdm([1, 2, 3, 4], desc="Testing cooldown_bars"):
inputs = MoneyLineInputs(cooldown_bars=val)
result = simulate_money_line(df, symbol, inputs, config)
cooldown_results.append((val, len(result.trades), result.total_pnl))
print(f"cooldown_bars={val}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
results['cooldown_bars'] = cooldown_results
# Test 3: momentum_min_adx (should filter signals)
print("\n" + "="*60)
print("TEST 3: momentum_min_adx (18, 21, 24, 27)")
print("Expected: HIGHER ADX = FEWER signals")
print("="*60)
adx_results = []
for val in tqdm([18.0, 21.0, 24.0, 27.0], desc="Testing momentum_min_adx"):
inputs = MoneyLineInputs(momentum_min_adx=val)
result = simulate_money_line(df, symbol, inputs, config)
adx_results.append((val, len(result.trades), result.total_pnl))
print(f"momentum_min_adx={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
results['momentum_min_adx'] = adx_results
# Test 4: ma_gap_threshold (should affect signal generation)
print("\n" + "="*60)
print("TEST 4: ma_gap_threshold (0.2, 0.3, 0.4, 0.5)")
print("Expected: Different signal counts")
print("="*60)
gap_results = []
for val in tqdm([0.2, 0.3, 0.4, 0.5], desc="Testing ma_gap_threshold"):
inputs = MoneyLineInputs(ma_gap_threshold=val)
result = simulate_money_line(df, symbol, inputs, config)
gap_results.append((val, len(result.trades), result.total_pnl))
print(f"ma_gap_threshold={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
results['ma_gap_threshold'] = gap_results
return results
def analyze_results(results: Dict[str, List[Tuple[float, int, float]]]):
"""Determine if parameters actually affect behavior."""
print("\n" + "="*80)
print("DIAGNOSTIC ANALYSIS")
print("="*80)
for param_name, values in results.items():
signal_counts = [x[1] for x in values]
pnls = [x[2] for x in values]
# Check if all identical (parameter has NO effect)
if len(set(signal_counts)) == 1 and len(set(pnls)) == 1:
print(f"\n🔴 {param_name}: NO EFFECT - All configs produce identical results!")
print(f" All {signal_counts[0]} trades, ${pnls[0]:.2f} PnL")
print(f" ⚠️ Parameter is NOT being applied or is overridden")
else:
# Parameter has some effect
min_signals, max_signals = min(signal_counts), max(signal_counts)
min_pnl, max_pnl = min(pnls), max(pnls)
range_pct = ((max_signals - min_signals) / min_signals * 100) if min_signals > 0 else 0
print(f"\n{param_name}: HAS EFFECT")
print(f" Signal range: {min_signals}-{max_signals} ({range_pct:.1f}% variation)")
print(f" PnL range: ${min_pnl:.2f} to ${max_pnl:.2f}")
def test_extreme_configs(csv_path: Path, symbol: str):
"""Test extreme parameter combinations to verify they produce different results."""
print("\n" + "="*80)
print("EXTREME CONFIGURATION TEST")
print("="*80)
df = load_csv(csv_path)
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
# Ultra-loose config (should generate MANY signals)
loose = MoneyLineInputs(
flip_threshold_percent=0.3,
cooldown_bars=1,
momentum_min_adx=15.0,
ma_gap_threshold=0.1,
momentum_spacing=2,
momentum_cooldown=1
)
# Ultra-strict config (should generate FEW signals)
strict = MoneyLineInputs(
flip_threshold_percent=0.8,
cooldown_bars=5,
momentum_min_adx=30.0,
ma_gap_threshold=0.6,
momentum_spacing=6,
momentum_cooldown=5
)
# Baseline
baseline = MoneyLineInputs()
print("\n🔹 ULTRA-LOOSE config:")
loose_result = simulate_money_line(df, symbol, loose, config)
print(f" Trades: {len(loose_result.trades)}, PnL: ${loose_result.total_pnl:.2f}")
print("\n🔹 BASELINE config:")
baseline_result = simulate_money_line(df, symbol, baseline, config)
print(f" Trades: {len(baseline_result.trades)}, PnL: ${baseline_result.total_pnl:.2f}")
print("\n🔹 ULTRA-STRICT config:")
strict_result = simulate_money_line(df, symbol, strict, config)
print(f" Trades: {len(strict_result.trades)}, PnL: ${strict_result.total_pnl:.2f}")
if len(loose_result.trades) == len(baseline_result.trades) == len(strict_result.trades):
print("\n🔴 CRITICAL BUG: All three configs produce IDENTICAL trade counts!")
print(" Parameter system is completely broken.")
return False
else:
print("\n✅ Configs produce different results - parameter system works")
print(f" Variation: {len(strict_result.trades)} to {len(loose_result.trades)} trades")
return True
def main():
parser = argparse.ArgumentParser(description="Diagnostic parameter testing")
parser.add_argument("--csv", type=Path, required=True, help="Path to OHLCV CSV")
parser.add_argument("--symbol", default="SOL-PERP", help="Symbol name")
args = parser.parse_args()
print("="*80)
print("V9 PARAMETER DIAGNOSTIC SUITE")
print("="*80)
print(f"Data: {args.csv}")
print(f"Symbol: {args.symbol}")
# Test 1: Individual parameter impact
results = test_parameter_impact(args.csv, args.symbol)
analyze_results(results)
# Test 2: Extreme configs
system_works = test_extreme_configs(args.csv, args.symbol)
print("\n" + "="*80)
print("CONCLUSION")
print("="*80)
if system_works:
print("✅ Parameter system is functional")
print(" The parameter insensitivity in the sweep may indicate:")
print(" 1. The sweep grid didn't explore extreme enough values")
print(" 2. Parameters have non-linear interactions")
print(" 3. Core edge comes from EMA logic, not parameter tuning")
else:
print("🔴 Parameter system is BROKEN - parameters don't affect behavior")
print(" This explains why the sweep found 100+ identical results")
print(" Fix required before optimization can proceed")
print("\n" + "="*80)
if __name__ == "__main__":
main()