fix: Database-first cluster status detection + Stop button clarification
CRITICAL FIX (Nov 30, 2025):
- Dashboard showed 'idle' despite 22+ worker processes running
- Root cause: SSH-based worker detection timing out
- Solution: Check database for running chunks FIRST
Changes:
1. app/api/cluster/status/route.ts:
- Query exploration database before SSH detection
- If running chunks exist, mark workers 'active' even if SSH fails
- Override worker status: 'offline' → 'active' when chunks running
- Log: '✅ Cluster status: ACTIVE (database shows running chunks)'
- Database is source of truth, SSH only for supplementary metrics
2. app/cluster/page.tsx:
- Stop button ALREADY EXISTS (conditionally shown)
- Shows Start when status='idle', Stop when status='active'
- No code changes needed - fixed by status detection
Result:
- Dashboard now shows 'ACTIVE' with 2 workers (correct)
- Workers show 'active' status (was 'offline')
- Stop button automatically visible when cluster active
- System resilient to SSH timeouts/network issues
Verified:
- Container restarted: Nov 30 21:18 UTC
- API tested: Returns status='active', activeWorkers=2
- Logs confirm: Database-first logic working
- Workers confirmed running: 22+ processes on worker1, workers on worker2
This commit is contained in:
142
scripts/compare_v8_v9.py
Normal file
142
scripts/compare_v8_v9.py
Normal file
@@ -0,0 +1,142 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick comparison: v8 "Sticky Trend" vs v9 "MA Gap" baseline performance.
|
||||
|
||||
Runs both indicators with default parameters on the same dataset.
|
||||
"""
|
||||
from pathlib import Path
|
||||
import pandas as pd
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.simulator import simulate_money_line, TradeConfig, SimulationResult
|
||||
from backtester.indicators.money_line import money_line_signals, MoneyLineInputs
|
||||
from backtester.indicators.money_line_v8 import money_line_v8_signals, MoneyLineV8Inputs
|
||||
|
||||
# Load data
|
||||
print("Loading SOLUSDT 5m data (Aug 1 - Nov 28, 2025)...")
|
||||
data_slice = load_csv(Path("data/solusdt_5m.csv"), "SOLUSDT", "5m")
|
||||
df = data_slice.data
|
||||
print(f"Loaded {len(df)} candles\n")
|
||||
|
||||
# Run v9 baseline
|
||||
print("=" * 60)
|
||||
print("v9 'MA Gap' - Baseline (Default Parameters)")
|
||||
print("=" * 60)
|
||||
v9_inputs = MoneyLineInputs(
|
||||
flip_threshold_percent=0.6,
|
||||
cooldown_bars=2,
|
||||
ma_gap_threshold=0.35,
|
||||
momentum_min_adx=23.0,
|
||||
momentum_long_max_pos=70.0,
|
||||
momentum_short_min_pos=25.0,
|
||||
)
|
||||
v9_result = simulate_money_line(df, "SOLUSDT", v9_inputs)
|
||||
v9_trades = v9_result.trades
|
||||
print(f"Generated {len(v9_trades)} trades")
|
||||
|
||||
wins_v9 = sum(1 for t in v9_trades if t.realized_pnl > 0)
|
||||
losses_v9 = sum(1 for t in v9_trades if t.realized_pnl <= 0)
|
||||
gross_wins_v9 = sum(t.realized_pnl for t in v9_trades if t.realized_pnl > 0)
|
||||
gross_losses_v9 = abs(sum(t.realized_pnl for t in v9_trades if t.realized_pnl <= 0))
|
||||
pf_v9 = gross_wins_v9 / gross_losses_v9 if gross_losses_v9 > 0 else 0.0
|
||||
wr_v9 = (wins_v9 / len(v9_trades) * 100) if v9_trades else 0.0
|
||||
avg_win_v9 = (gross_wins_v9 / wins_v9) if wins_v9 > 0 else 0.0
|
||||
avg_loss_v9 = (gross_losses_v9 / losses_v9) if losses_v9 > 0 else 0.0
|
||||
|
||||
print(f"\nResults:")
|
||||
print(f" Total P&L: ${v9_result.total_pnl:.2f}")
|
||||
print(f" Total Trades: {len(v9_trades)}")
|
||||
print(f" Win Rate: {wr_v9:.2f}%")
|
||||
print(f" Profit Factor: {pf_v9:.3f}")
|
||||
print(f" Max Drawdown: ${v9_result.max_drawdown:.2f}")
|
||||
print(f" Avg Win: ${avg_win_v9:.2f}")
|
||||
print(f" Avg Loss: ${avg_loss_v9:.2f}")
|
||||
|
||||
# Run v8 baseline
|
||||
print("\n" + "=" * 60)
|
||||
print("v8 'Sticky Trend' - Baseline (Default Parameters)")
|
||||
print("=" * 60)
|
||||
|
||||
# Create custom simulator for v8 since it doesn't use MoneyLineInputs
|
||||
v8_signals = money_line_v8_signals(df, MoneyLineV8Inputs())
|
||||
print(f"Generated {len(v8_signals)} signals")
|
||||
|
||||
# Manually simulate v8 trades using same logic as v9
|
||||
from backtester.simulator import _simulate_trade, TradeConfig
|
||||
v8_trades = []
|
||||
config = TradeConfig()
|
||||
index_positions = {ts: idx for idx, ts in enumerate(df.index)}
|
||||
next_available = 0
|
||||
|
||||
for sig in v8_signals:
|
||||
if sig.timestamp not in index_positions:
|
||||
continue
|
||||
idx = index_positions[sig.timestamp]
|
||||
if idx < next_available:
|
||||
continue
|
||||
# Convert v8 signal to v9 signal format for simulator
|
||||
from backtester.indicators.money_line import MoneyLineSignal
|
||||
ml_sig = MoneyLineSignal(
|
||||
timestamp=sig.timestamp,
|
||||
direction=sig.direction,
|
||||
entry_price=sig.entry_price,
|
||||
adx=sig.adx,
|
||||
atr=sig.atr,
|
||||
rsi=sig.rsi,
|
||||
volume_ratio=sig.volume_ratio,
|
||||
price_position=sig.price_position,
|
||||
signal_type="primary"
|
||||
)
|
||||
trade = _simulate_trade(df, idx, ml_sig, "SOLUSDT", config)
|
||||
if trade:
|
||||
v8_trades.append(trade)
|
||||
next_available = trade._exit_index
|
||||
|
||||
v8_total_pnl = sum(t.realized_pnl for t in v8_trades)
|
||||
v8_max_dd = 0.0
|
||||
equity = 0.0
|
||||
peak = 0.0
|
||||
for t in v8_trades:
|
||||
equity += t.realized_pnl
|
||||
peak = max(peak, equity)
|
||||
v8_max_dd = min(v8_max_dd, equity - peak)
|
||||
|
||||
wins_v8 = sum(1 for t in v8_trades if t.realized_pnl > 0)
|
||||
losses_v8 = sum(1 for t in v8_trades if t.realized_pnl <= 0)
|
||||
gross_wins_v8 = sum(t.realized_pnl for t in v8_trades if t.realized_pnl > 0)
|
||||
gross_losses_v8 = abs(sum(t.realized_pnl for t in v8_trades if t.realized_pnl <= 0))
|
||||
pf_v8 = gross_wins_v8 / gross_losses_v8 if gross_losses_v8 > 0 else 0.0
|
||||
wr_v8 = (wins_v8 / len(v8_trades) * 100) if v8_trades else 0.0
|
||||
avg_win_v8 = (gross_wins_v8 / wins_v8) if wins_v8 > 0 else 0.0
|
||||
avg_loss_v8 = (gross_losses_v8 / losses_v8) if losses_v8 > 0 else 0.0
|
||||
|
||||
print(f"\nResults:")
|
||||
print(f" Total P&L: ${v8_total_pnl:.2f}")
|
||||
print(f" Total Trades: {len(v8_trades)}")
|
||||
print(f" Win Rate: {wr_v8:.2f}%")
|
||||
print(f" Profit Factor: {pf_v8:.3f}")
|
||||
print(f" Max Drawdown: ${v8_max_dd:.2f}")
|
||||
print(f" Avg Win: ${avg_win_v8:.2f}")
|
||||
print(f" Avg Loss: ${avg_loss_v8:.2f}")
|
||||
|
||||
# Comparison
|
||||
print("\n" + "=" * 60)
|
||||
print("HEAD-TO-HEAD COMPARISON")
|
||||
print("=" * 60)
|
||||
pnl_diff = v9_result.total_pnl - v8_total_pnl
|
||||
pnl_diff_pct = (pnl_diff / abs(v8_total_pnl)) * 100 if v8_total_pnl != 0 else 0
|
||||
trade_diff = len(v9_trades) - len(v8_trades)
|
||||
wr_diff = wr_v9 - wr_v8
|
||||
|
||||
print(f"P&L: v9 ${v9_result.total_pnl:.2f} vs v8 ${v8_total_pnl:.2f} (Δ ${pnl_diff:+.2f}, {pnl_diff_pct:+.1f}%)")
|
||||
print(f"Trades: v9 {len(v9_trades)} vs v8 {len(v8_trades)} (Δ {trade_diff:+d})")
|
||||
print(f"Win Rate: v9 {wr_v9:.2f}% vs v8 {wr_v8:.2f}% (Δ {wr_diff:+.2f}%)")
|
||||
print(f"Profit Factor: v9 {pf_v9:.3f} vs v8 {pf_v8:.3f}")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
if pnl_diff > 0:
|
||||
print(f"WINNER: v9 'MA Gap' by ${pnl_diff:.2f} ({pnl_diff_pct:.1f}%)")
|
||||
print("v9's faster entries + MA gap context outperform v8's conservative approach")
|
||||
else:
|
||||
print(f"WINNER: v8 'Sticky Trend' by ${-pnl_diff:.2f} ({-pnl_diff_pct:.1f}%)")
|
||||
print("v8's conservative confirmation bars outperform v9's speed")
|
||||
print("=" * 60)
|
||||
68
scripts/debug_ma_gap.py
Normal file
68
scripts/debug_ma_gap.py
Normal file
@@ -0,0 +1,68 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Debug MA gap scoring to understand parameter insensitivity."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
# Add backtester to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.indicators.money_line import MoneyLineInputs, ema
|
||||
|
||||
def analyze_ma_gap_distribution(csv_path: str):
|
||||
"""Analyze MA gap score distribution to understand parameter behavior."""
|
||||
|
||||
print("=" * 80)
|
||||
print("MA GAP DISTRIBUTION ANALYSIS")
|
||||
print("=" * 80)
|
||||
|
||||
# Load data
|
||||
data_slice = load_csv(Path(csv_path), "SOL-PERP", "5m")
|
||||
df = data_slice.data # DataSlice.data is the DataFrame
|
||||
print(f"Loaded {len(df)} bars\n")
|
||||
|
||||
# Calculate EMAs
|
||||
df["ema_fast"] = ema(df["close"], 50)
|
||||
df["ema_slow"] = ema(df["close"], 200)
|
||||
|
||||
# Calculate raw MA gap percentage
|
||||
ma_gap = 100.0 * (df["ema_fast"] - df["ema_slow"]) / df["close"]
|
||||
|
||||
print("RAW MA GAP (%) DISTRIBUTION:")
|
||||
print(f" Min: {ma_gap.min():.4f}%")
|
||||
print(f" 25th: {ma_gap.quantile(0.25):.4f}%")
|
||||
print(f" Median: {ma_gap.median():.4f}%")
|
||||
print(f" 75th: {ma_gap.quantile(0.75):.4f}%")
|
||||
print(f" Max: {ma_gap.max():.4f}%")
|
||||
print(f" Std: {ma_gap.std():.4f}%\n")
|
||||
|
||||
# Test different thresholds
|
||||
for threshold in [0.2, 0.3, 0.35, 0.4, 0.5]:
|
||||
gap_score = np.tanh(ma_gap / threshold)
|
||||
|
||||
print(f"\nTHRESHOLD = {threshold} (default: 0.35):")
|
||||
print(f" gap_score min: {gap_score.min():.6f}")
|
||||
print(f" gap_score max: {gap_score.max():.6f}")
|
||||
print(f" gap_score median: {gap_score.median():.6f}")
|
||||
|
||||
# Check flip_threshold comparisons
|
||||
for flip_pct in [0.4, 0.5, 0.6, 0.7]:
|
||||
flip_threshold = flip_pct / 100.0 # Convert to decimal
|
||||
|
||||
# Count how many bars exceed threshold
|
||||
long_signals = (gap_score > flip_threshold).sum()
|
||||
short_signals = (gap_score < -flip_threshold).sum()
|
||||
total_signals = long_signals + short_signals
|
||||
|
||||
print(f" flip_threshold={flip_pct}% → {total_signals} potential signals")
|
||||
print(f" (LONG: {long_signals}, SHORT: {short_signals})")
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: python debug_ma_gap.py <csv_path>")
|
||||
sys.exit(1)
|
||||
|
||||
analyze_ma_gap_distribution(sys.argv[1])
|
||||
212
scripts/diagnostic_sweep.py
Executable file
212
scripts/diagnostic_sweep.py
Executable file
@@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Diagnostic Parameter Sweep - Verify Parameters Actually Control Behavior
|
||||
Purpose: Determine if parameter insensitivity is a bug or reality
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.append(str(PROJECT_ROOT))
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from tqdm import tqdm
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.indicators.money_line import MoneyLineInputs, money_line_signals
|
||||
from backtester.simulator import TradeConfig, simulate_money_line
|
||||
|
||||
|
||||
def test_parameter_impact(csv_path: Path, symbol: str) -> Dict[str, List[Tuple[float, int, float]]]:
|
||||
"""
|
||||
Test if each parameter actually changes behavior.
|
||||
Returns: {param_name: [(param_value, num_signals, total_pnl), ...]}
|
||||
"""
|
||||
print("Loading data...")
|
||||
data_slice = load_csv(Path(csv_path), symbol, "5m")
|
||||
df = data_slice.data
|
||||
print(f"Loaded {len(df)} bars")
|
||||
|
||||
baseline = MoneyLineInputs()
|
||||
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
|
||||
|
||||
results = {}
|
||||
|
||||
# Test 1: flip_threshold_percent (should dramatically affect signal count)
|
||||
print("\n" + "="*60)
|
||||
print("TEST 1: flip_threshold_percent (0.4, 0.5, 0.6, 0.7)")
|
||||
print("Expected: LOWER threshold = MORE signals")
|
||||
print("="*60)
|
||||
flip_results = []
|
||||
for val in tqdm([0.4, 0.5, 0.6, 0.7], desc="Testing flip_threshold"):
|
||||
inputs = MoneyLineInputs(flip_threshold_percent=val)
|
||||
result = simulate_money_line(df, symbol, inputs, config)
|
||||
flip_results.append((val, len(result.trades), result.total_pnl))
|
||||
print(f"flip_threshold={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
|
||||
results['flip_threshold_percent'] = flip_results
|
||||
|
||||
# Test 2: cooldown_bars (should affect signal frequency)
|
||||
print("\n" + "="*60)
|
||||
print("TEST 2: cooldown_bars (1, 2, 3, 4)")
|
||||
print("Expected: LOWER cooldown = MORE signals")
|
||||
print("="*60)
|
||||
cooldown_results = []
|
||||
for val in tqdm([1, 2, 3, 4], desc="Testing cooldown_bars"):
|
||||
inputs = MoneyLineInputs(cooldown_bars=val)
|
||||
result = simulate_money_line(df, symbol, inputs, config)
|
||||
cooldown_results.append((val, len(result.trades), result.total_pnl))
|
||||
print(f"cooldown_bars={val}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
|
||||
results['cooldown_bars'] = cooldown_results
|
||||
|
||||
# Test 3: momentum_min_adx (should filter signals)
|
||||
print("\n" + "="*60)
|
||||
print("TEST 3: momentum_min_adx (18, 21, 24, 27)")
|
||||
print("Expected: HIGHER ADX = FEWER signals")
|
||||
print("="*60)
|
||||
adx_results = []
|
||||
for val in tqdm([18.0, 21.0, 24.0, 27.0], desc="Testing momentum_min_adx"):
|
||||
inputs = MoneyLineInputs(momentum_min_adx=val)
|
||||
result = simulate_money_line(df, symbol, inputs, config)
|
||||
adx_results.append((val, len(result.trades), result.total_pnl))
|
||||
print(f"momentum_min_adx={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
|
||||
results['momentum_min_adx'] = adx_results
|
||||
|
||||
# Test 4: ma_gap_threshold (should affect signal generation)
|
||||
print("\n" + "="*60)
|
||||
print("TEST 4: ma_gap_threshold (0.2, 0.3, 0.4, 0.5)")
|
||||
print("Expected: Different signal counts")
|
||||
print("="*60)
|
||||
gap_results = []
|
||||
for val in tqdm([0.2, 0.3, 0.4, 0.5], desc="Testing ma_gap_threshold"):
|
||||
inputs = MoneyLineInputs(ma_gap_threshold=val)
|
||||
result = simulate_money_line(df, symbol, inputs, config)
|
||||
gap_results.append((val, len(result.trades), result.total_pnl))
|
||||
print(f"ma_gap_threshold={val:.1f}: {len(result.trades)} trades, ${result.total_pnl:.2f} PnL")
|
||||
results['ma_gap_threshold'] = gap_results
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def analyze_results(results: Dict[str, List[Tuple[float, int, float]]]):
|
||||
"""Determine if parameters actually affect behavior."""
|
||||
print("\n" + "="*80)
|
||||
print("DIAGNOSTIC ANALYSIS")
|
||||
print("="*80)
|
||||
|
||||
for param_name, values in results.items():
|
||||
signal_counts = [x[1] for x in values]
|
||||
pnls = [x[2] for x in values]
|
||||
|
||||
# Check if all identical (parameter has NO effect)
|
||||
if len(set(signal_counts)) == 1 and len(set(pnls)) == 1:
|
||||
print(f"\n🔴 {param_name}: NO EFFECT - All configs produce identical results!")
|
||||
print(f" All {signal_counts[0]} trades, ${pnls[0]:.2f} PnL")
|
||||
print(f" ⚠️ Parameter is NOT being applied or is overridden")
|
||||
else:
|
||||
# Parameter has some effect
|
||||
min_signals, max_signals = min(signal_counts), max(signal_counts)
|
||||
min_pnl, max_pnl = min(pnls), max(pnls)
|
||||
range_pct = ((max_signals - min_signals) / min_signals * 100) if min_signals > 0 else 0
|
||||
|
||||
print(f"\n✅ {param_name}: HAS EFFECT")
|
||||
print(f" Signal range: {min_signals}-{max_signals} ({range_pct:.1f}% variation)")
|
||||
print(f" PnL range: ${min_pnl:.2f} to ${max_pnl:.2f}")
|
||||
|
||||
|
||||
def test_extreme_configs(csv_path: Path, symbol: str):
|
||||
"""Test extreme parameter combinations to verify they produce different results."""
|
||||
print("\n" + "="*80)
|
||||
print("EXTREME CONFIGURATION TEST")
|
||||
print("="*80)
|
||||
|
||||
df = load_csv(csv_path)
|
||||
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
|
||||
|
||||
# Ultra-loose config (should generate MANY signals)
|
||||
loose = MoneyLineInputs(
|
||||
flip_threshold_percent=0.3,
|
||||
cooldown_bars=1,
|
||||
momentum_min_adx=15.0,
|
||||
ma_gap_threshold=0.1,
|
||||
momentum_spacing=2,
|
||||
momentum_cooldown=1
|
||||
)
|
||||
|
||||
# Ultra-strict config (should generate FEW signals)
|
||||
strict = MoneyLineInputs(
|
||||
flip_threshold_percent=0.8,
|
||||
cooldown_bars=5,
|
||||
momentum_min_adx=30.0,
|
||||
ma_gap_threshold=0.6,
|
||||
momentum_spacing=6,
|
||||
momentum_cooldown=5
|
||||
)
|
||||
|
||||
# Baseline
|
||||
baseline = MoneyLineInputs()
|
||||
|
||||
print("\n🔹 ULTRA-LOOSE config:")
|
||||
loose_result = simulate_money_line(df, symbol, loose, config)
|
||||
print(f" Trades: {len(loose_result.trades)}, PnL: ${loose_result.total_pnl:.2f}")
|
||||
|
||||
print("\n🔹 BASELINE config:")
|
||||
baseline_result = simulate_money_line(df, symbol, baseline, config)
|
||||
print(f" Trades: {len(baseline_result.trades)}, PnL: ${baseline_result.total_pnl:.2f}")
|
||||
|
||||
print("\n🔹 ULTRA-STRICT config:")
|
||||
strict_result = simulate_money_line(df, symbol, strict, config)
|
||||
print(f" Trades: {len(strict_result.trades)}, PnL: ${strict_result.total_pnl:.2f}")
|
||||
|
||||
if len(loose_result.trades) == len(baseline_result.trades) == len(strict_result.trades):
|
||||
print("\n🔴 CRITICAL BUG: All three configs produce IDENTICAL trade counts!")
|
||||
print(" Parameter system is completely broken.")
|
||||
return False
|
||||
else:
|
||||
print("\n✅ Configs produce different results - parameter system works")
|
||||
print(f" Variation: {len(strict_result.trades)} to {len(loose_result.trades)} trades")
|
||||
return True
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Diagnostic parameter testing")
|
||||
parser.add_argument("--csv", type=Path, required=True, help="Path to OHLCV CSV")
|
||||
parser.add_argument("--symbol", default="SOL-PERP", help="Symbol name")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("="*80)
|
||||
print("V9 PARAMETER DIAGNOSTIC SUITE")
|
||||
print("="*80)
|
||||
print(f"Data: {args.csv}")
|
||||
print(f"Symbol: {args.symbol}")
|
||||
|
||||
# Test 1: Individual parameter impact
|
||||
results = test_parameter_impact(args.csv, args.symbol)
|
||||
analyze_results(results)
|
||||
|
||||
# Test 2: Extreme configs
|
||||
system_works = test_extreme_configs(args.csv, args.symbol)
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("CONCLUSION")
|
||||
print("="*80)
|
||||
if system_works:
|
||||
print("✅ Parameter system is functional")
|
||||
print(" The parameter insensitivity in the sweep may indicate:")
|
||||
print(" 1. The sweep grid didn't explore extreme enough values")
|
||||
print(" 2. Parameters have non-linear interactions")
|
||||
print(" 3. Core edge comes from EMA logic, not parameter tuning")
|
||||
else:
|
||||
print("🔴 Parameter system is BROKEN - parameters don't affect behavior")
|
||||
print(" This explains why the sweep found 100+ identical results")
|
||||
print(" Fix required before optimization can proceed")
|
||||
|
||||
print("\n" + "="*80)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -219,9 +219,20 @@ class ProgressBar:
|
||||
elapsed = time.time() - self.start_time
|
||||
rate = elapsed / count if count else 0
|
||||
remaining = rate * (self.total - count) if rate else 0
|
||||
|
||||
# Format elapsed time
|
||||
elapsed_hours = int(elapsed // 3600)
|
||||
elapsed_mins = int((elapsed % 3600) // 60)
|
||||
elapsed_str = f"{elapsed_hours}h {elapsed_mins}m" if elapsed_hours > 0 else f"{elapsed_mins}m"
|
||||
|
||||
# Format remaining time
|
||||
remaining_hours = int(remaining // 3600)
|
||||
remaining_mins = int((remaining % 3600) // 60)
|
||||
remaining_str = f"{remaining_hours}h {remaining_mins}m" if remaining_hours > 0 else f"{remaining_mins}m"
|
||||
|
||||
sys.stdout.write(
|
||||
f"\r[{bar}] {percent*100:5.1f}% ({count}/{self.total}) | "
|
||||
f"Elapsed {elapsed:6.1f}s | ETA {remaining:6.1f}s"
|
||||
f"Elapsed {elapsed_str:>7} | ETA {remaining_str:>7}"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
if count >= self.total:
|
||||
|
||||
256
scripts/run_backtest_sweep_rsi.py
Executable file
256
scripts/run_backtest_sweep_rsi.py
Executable file
@@ -0,0 +1,256 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Parameter sweep for v9 Money Line with RSI Divergence filter.
|
||||
|
||||
Tests same parameter grid as vanilla v9 but adds RSI divergence filtering
|
||||
to all trades. Compares if divergence improves results across all parameter
|
||||
combinations or just the baseline.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import pandas as pd
|
||||
import numpy as np
|
||||
from multiprocessing import Pool, cpu_count
|
||||
from datetime import datetime
|
||||
import argparse
|
||||
|
||||
# Add project root to path
|
||||
sys.path.insert(0, '/home/icke/traderv4')
|
||||
|
||||
from backtester.data_loader import load_csv, DataSlice
|
||||
from backtester.simulator import simulate_money_line, TradeConfig
|
||||
from backtester.indicators.money_line import MoneyLineInputs
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def calculate_rsi(series, period=14):
|
||||
"""Calculate RSI indicator."""
|
||||
delta = series.diff()
|
||||
gain = (delta.where(delta > 0, 0)).rolling(window=period).mean()
|
||||
loss = (-delta.where(delta < 0, 0)).rolling(window=period).mean()
|
||||
rs = gain / loss
|
||||
rsi = 100 - (100 / (1 + rs))
|
||||
return rsi
|
||||
|
||||
|
||||
def detect_rsi_divergence(df, trade, lookback=20):
|
||||
"""
|
||||
Detect RSI divergence for a trade.
|
||||
|
||||
Returns True if divergence detected, False otherwise.
|
||||
|
||||
Bullish divergence (LONG): Price makes lower low, RSI makes higher low
|
||||
Bearish divergence (SHORT): Price makes higher high, RSI makes lower high
|
||||
"""
|
||||
entry_idx = df.index.get_loc(trade.entry_time)
|
||||
|
||||
# Need enough history
|
||||
if entry_idx < lookback:
|
||||
return True # Keep trades at start (not enough data to filter)
|
||||
|
||||
lookback_data = df.iloc[entry_idx-lookback:entry_idx+1]
|
||||
|
||||
if trade.direction == 'long':
|
||||
# Bullish divergence: price low more recent than RSI low
|
||||
price_min_idx = lookback_data['close'].idxmin()
|
||||
price_min_loc = lookback_data.index.get_loc(price_min_idx)
|
||||
rsi_min_idx = lookback_data['rsi'].idxmin()
|
||||
rsi_min_loc = lookback_data.index.get_loc(rsi_min_idx)
|
||||
|
||||
return price_min_loc > rsi_min_loc
|
||||
|
||||
elif trade.direction == 'short':
|
||||
# Bearish divergence: price high more recent than RSI high
|
||||
price_max_idx = lookback_data['close'].idxmax()
|
||||
price_max_loc = lookback_data.index.get_loc(price_max_idx)
|
||||
rsi_max_idx = lookback_data['rsi'].idxmax()
|
||||
rsi_max_loc = lookback_data.index.get_loc(rsi_max_idx)
|
||||
|
||||
return price_max_loc > rsi_max_loc
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def test_params(args):
|
||||
"""Test a single parameter combination with RSI divergence filter."""
|
||||
params, df = args
|
||||
|
||||
try:
|
||||
# Create MoneyLineInputs with correct field names
|
||||
inputs = MoneyLineInputs(
|
||||
flip_threshold_percent=params['flip_threshold'],
|
||||
ma_gap_threshold=params['ma_gap'],
|
||||
momentum_min_adx=params['momentum_adx'],
|
||||
momentum_long_max_pos=params['momentum_long_pos'],
|
||||
momentum_short_min_pos=params['momentum_short_pos'],
|
||||
cooldown_bars=params['cooldown_bars'],
|
||||
momentum_spacing=params['momentum_spacing'],
|
||||
momentum_cooldown=params['momentum_cooldown']
|
||||
)
|
||||
|
||||
# Run simulation
|
||||
trade_config = TradeConfig(position_size=1000.0, max_bars_per_trade=2880)
|
||||
result = simulate_money_line(df, 'SOLUSDT', inputs=inputs, config=trade_config)
|
||||
all_trades = result.trades
|
||||
|
||||
# Filter for RSI divergence
|
||||
trades = [t for t in all_trades if detect_rsi_divergence(df, t)]
|
||||
|
||||
if not trades:
|
||||
return None
|
||||
|
||||
total_pnl = sum(t.realized_pnl for t in trades)
|
||||
wins = sum(1 for t in trades if t.realized_pnl > 0)
|
||||
losses = sum(1 for t in trades if t.realized_pnl < 0)
|
||||
win_rate = (wins / len(trades) * 100) if trades else 0
|
||||
|
||||
avg_win = np.mean([t.realized_pnl for t in trades if t.realized_pnl > 0]) if wins > 0 else 0
|
||||
avg_loss = np.mean([t.realized_pnl for t in trades if t.realized_pnl < 0]) if losses > 0 else 0
|
||||
profit_factor = abs(avg_win * wins / (avg_loss * losses)) if (avg_loss != 0 and losses > 0) else 0
|
||||
|
||||
max_dd = 0
|
||||
peak = 0
|
||||
cumulative = 0
|
||||
for trade in trades:
|
||||
cumulative += trade.realized_pnl
|
||||
if cumulative > peak:
|
||||
peak = cumulative
|
||||
dd = peak - cumulative
|
||||
if dd > max_dd:
|
||||
max_dd = dd
|
||||
|
||||
return {
|
||||
'flip_threshold': params['flip_threshold'],
|
||||
'ma_gap': params['ma_gap'],
|
||||
'momentum_adx': params['momentum_adx'],
|
||||
'momentum_long_pos': params['momentum_long_pos'],
|
||||
'momentum_short_pos': params['momentum_short_pos'],
|
||||
'cooldown_bars': params['cooldown_bars'],
|
||||
'momentum_spacing': params['momentum_spacing'],
|
||||
'momentum_cooldown': params['momentum_cooldown'],
|
||||
'total_pnl': total_pnl,
|
||||
'num_trades': len(trades),
|
||||
'win_rate': win_rate,
|
||||
'profit_factor': profit_factor,
|
||||
'max_drawdown': max_dd,
|
||||
'avg_win': avg_win,
|
||||
'avg_loss': avg_loss
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error testing params {params}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Run v9 parameter sweep with RSI divergence filter')
|
||||
parser.add_argument('--workers', type=int, default=None, help='Number of worker processes')
|
||||
parser.add_argument('--top', type=int, default=None, help='Only save top N results')
|
||||
args = parser.parse_args()
|
||||
|
||||
start_time = datetime.now().timestamp()
|
||||
workers = args.workers if args.workers else max(1, cpu_count() - 2)
|
||||
|
||||
print(f"v9 + RSI DIVERGENCE Parameter Sweep")
|
||||
print(f"Workers: {workers}")
|
||||
print(f"Started: {datetime.now()}")
|
||||
print()
|
||||
|
||||
# Load data
|
||||
print("Loading data...")
|
||||
data_slice = load_csv(Path('data/solusdt_5m.csv'), 'SOLUSDT', '5m')
|
||||
df = data_slice.data
|
||||
print(f"Loaded {len(df)} candles")
|
||||
|
||||
# Calculate RSI for divergence detection
|
||||
print("Calculating RSI...")
|
||||
df['rsi'] = calculate_rsi(df['close'], 14)
|
||||
print()
|
||||
|
||||
# Parameter grid (same as vanilla v9)
|
||||
param_grid = {
|
||||
'flip_threshold': [0.4, 0.5, 0.6, 0.7],
|
||||
'ma_gap': [0.20, 0.30, 0.40, 0.50],
|
||||
'momentum_adx': [18, 21, 24, 27],
|
||||
'momentum_long_pos': [60, 65, 70, 75],
|
||||
'momentum_short_pos': [20, 25, 30, 35],
|
||||
'cooldown_bars': [1, 2, 3, 4],
|
||||
'momentum_spacing': [2, 3, 4, 5],
|
||||
'momentum_cooldown': [1, 2, 3, 4]
|
||||
}
|
||||
|
||||
# Generate all combinations
|
||||
from itertools import product
|
||||
keys = param_grid.keys()
|
||||
values = param_grid.values()
|
||||
combinations = [dict(zip(keys, v)) for v in product(*values)]
|
||||
|
||||
total_combos = len(combinations)
|
||||
print(f"Testing {total_combos:,} parameter combinations with RSI divergence filter")
|
||||
print(f"Parameter grid: {param_grid}")
|
||||
print()
|
||||
|
||||
# Prepare arguments
|
||||
test_args = [(params, df) for params in combinations]
|
||||
|
||||
# Run parallel tests with progress
|
||||
results = []
|
||||
completed = 0
|
||||
|
||||
with Pool(workers) as pool:
|
||||
for result in pool.imap_unordered(test_params, test_args):
|
||||
if result is not None:
|
||||
results.append(result)
|
||||
|
||||
completed += 1
|
||||
if completed % 100 == 0:
|
||||
elapsed = (datetime.now().timestamp() - start_time) / 60
|
||||
rate = completed / elapsed if elapsed > 0 else 0
|
||||
remaining = (total_combos - completed) / rate if rate > 0 else 0
|
||||
|
||||
print(f"Progress: {completed}/{total_combos} ({(completed/total_combos*100):.1f}%) | "
|
||||
f"Elapsed: {elapsed:.1f}m | Remaining: {remaining:.1f}m | Rate: {rate:.1f}/min")
|
||||
|
||||
print()
|
||||
print(f"Completed {len(results)} valid tests")
|
||||
|
||||
if not results:
|
||||
print("No valid results!")
|
||||
return
|
||||
|
||||
# Sort by total PnL
|
||||
results.sort(key=lambda x: x['total_pnl'], reverse=True)
|
||||
|
||||
# Save results
|
||||
results_df = pd.DataFrame(results)
|
||||
|
||||
if args.top:
|
||||
results_df = results_df.head(args.top)
|
||||
print(f"Saving top {args.top} results...")
|
||||
|
||||
output_file = 'sweep_v9_rsi_divergence.csv'
|
||||
results_df.to_csv(output_file, index=False)
|
||||
print(f"Results saved to {output_file}")
|
||||
print()
|
||||
|
||||
# Show top 10
|
||||
print("=" * 80)
|
||||
print("TOP 10 RESULTS (v9 + RSI Divergence)")
|
||||
print("=" * 80)
|
||||
for i, result in enumerate(results_df.head(10).to_dict('records'), 1):
|
||||
print(f"\n{i}. P&L: ${result['total_pnl']:.2f} | Trades: {result['num_trades']} | WR: {result['win_rate']:.1f}% | PF: {result['profit_factor']:.3f}")
|
||||
print(f" flip={result['flip_threshold']:.1f}, ma_gap={result['ma_gap']:.2f}, "
|
||||
f"adx={result['momentum_adx']}, long_pos={result['momentum_long_pos']}, "
|
||||
f"short_pos={result['momentum_short_pos']}")
|
||||
print(f" cooldown={result['cooldown_bars']}, spacing={result['momentum_spacing']}, "
|
||||
f"mom_cd={result['momentum_cooldown']}")
|
||||
print(f" Max DD: ${result['max_drawdown']:.2f} | Avg Win: ${result['avg_win']:.2f} | Avg Loss: ${result['avg_loss']:.2f}")
|
||||
|
||||
print()
|
||||
print(f"Finished: {datetime.now()}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
start_time = datetime.now().timestamp()
|
||||
main()
|
||||
216
scripts/sensitivity_analysis.py
Executable file
216
scripts/sensitivity_analysis.py
Executable file
@@ -0,0 +1,216 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Sensitivity Analysis - Single Parameter Impact Testing
|
||||
Purpose: Measure exact impact of each parameter in isolation
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.append(str(PROJECT_ROOT))
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from tqdm import tqdm
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.indicators.money_line import MoneyLineInputs
|
||||
from backtester.simulator import TradeConfig, simulate_money_line
|
||||
|
||||
|
||||
def test_single_parameter_sensitivity(
|
||||
csv_path: Path,
|
||||
symbol: str,
|
||||
param_name: str,
|
||||
values: List[float]
|
||||
) -> List[Tuple[float, int, float, float, float]]:
|
||||
"""
|
||||
Vary one parameter while holding others at baseline.
|
||||
Returns: [(value, trades, pnl, win_rate, profit_factor), ...]
|
||||
"""
|
||||
data_slice = load_csv(Path(csv_path), symbol, "5m")
|
||||
df = data_slice.data
|
||||
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
|
||||
|
||||
results = []
|
||||
baseline = MoneyLineInputs()
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
print(f"TESTING: {param_name}")
|
||||
print(f"Values: {values}")
|
||||
print(f"{'='*80}")
|
||||
|
||||
for val in tqdm(values, desc=f"Testing {param_name}", leave=False):
|
||||
# Create inputs with single parameter changed
|
||||
kwargs = {param_name: val}
|
||||
inputs = MoneyLineInputs(**kwargs)
|
||||
|
||||
result = simulate_money_line(df, symbol, inputs, config)
|
||||
|
||||
results.append((
|
||||
val,
|
||||
len(result.trades),
|
||||
result.total_pnl,
|
||||
result.win_rate,
|
||||
result.profit_factor if result.profit_factor != float('inf') else 999.0
|
||||
))
|
||||
|
||||
print(f"{param_name}={val:6.2f}: "
|
||||
f"{len(result.trades):3d} trades | "
|
||||
f"${result.total_pnl:8.2f} PnL | "
|
||||
f"{result.win_rate:5.1f}% WR | "
|
||||
f"PF={result.profit_factor:.3f}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def analyze_sensitivity(
|
||||
param_name: str,
|
||||
results: List[Tuple[float, int, float, float, float]]
|
||||
):
|
||||
"""Analyze parameter sensitivity results."""
|
||||
values = [r[0] for r in results]
|
||||
trades = [r[1] for r in results]
|
||||
pnls = [r[2] for r in results]
|
||||
win_rates = [r[3] for r in results]
|
||||
profit_factors = [r[4] for r in results]
|
||||
|
||||
# Find best by different metrics
|
||||
best_pnl_idx = pnls.index(max(pnls))
|
||||
best_wr_idx = win_rates.index(max(win_rates))
|
||||
best_pf_idx = profit_factors.index(max(profit_factors))
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
print(f"SENSITIVITY ANALYSIS: {param_name}")
|
||||
print(f"{'='*80}")
|
||||
|
||||
# Trade count variation
|
||||
min_trades, max_trades = min(trades), max(trades)
|
||||
if max_trades > 0:
|
||||
trade_variation = (max_trades - min_trades) / max_trades * 100
|
||||
print(f"Trade Count Range: {min_trades}-{max_trades} ({trade_variation:.1f}% variation)")
|
||||
|
||||
# PnL variation
|
||||
min_pnl, max_pnl = min(pnls), max(pnls)
|
||||
pnl_range = max_pnl - min_pnl
|
||||
print(f"PnL Range: ${min_pnl:.2f} to ${max_pnl:.2f} (${pnl_range:.2f} swing)")
|
||||
|
||||
# Best values
|
||||
print(f"\nBest PnL: {values[best_pnl_idx]:.2f} → ${pnls[best_pnl_idx]:.2f}")
|
||||
print(f"Best WR: {values[best_wr_idx]:.2f} → {win_rates[best_wr_idx]:.1f}%")
|
||||
print(f"Best PF: {values[best_pf_idx]:.2f} → {profit_factors[best_pf_idx]:.3f}")
|
||||
|
||||
# Sensitivity classification
|
||||
if pnl_range < 50:
|
||||
print(f"\n💡 {param_name}: LOW SENSITIVITY")
|
||||
print(f" PnL swing only ${pnl_range:.2f} - parameter has minimal impact")
|
||||
elif pnl_range < 200:
|
||||
print(f"\n💡 {param_name}: MODERATE SENSITIVITY")
|
||||
print(f" PnL swing ${pnl_range:.2f} - worth tuning but not critical")
|
||||
else:
|
||||
print(f"\n💡 {param_name}: HIGH SENSITIVITY")
|
||||
print(f" PnL swing ${pnl_range:.2f} - CRITICAL parameter to optimize")
|
||||
|
||||
|
||||
def run_full_sensitivity_suite(csv_path: Path, symbol: str):
|
||||
"""Test all parameters for sensitivity."""
|
||||
|
||||
test_configs = [
|
||||
("flip_threshold_percent", [0.3, 0.4, 0.5, 0.6, 0.7, 0.8]),
|
||||
("ma_gap_threshold", [0.15, 0.25, 0.35, 0.45, 0.55, 0.65]),
|
||||
("momentum_min_adx", [15.0, 18.0, 21.0, 24.0, 27.0, 30.0]),
|
||||
("momentum_long_max_pos", [60.0, 65.0, 70.0, 75.0, 80.0]),
|
||||
("momentum_short_min_pos", [15.0, 20.0, 25.0, 30.0, 35.0]),
|
||||
("cooldown_bars", [1, 2, 3, 4, 5]),
|
||||
("momentum_spacing", [2, 3, 4, 5, 6]),
|
||||
("momentum_cooldown", [1, 2, 3, 4, 5])
|
||||
]
|
||||
|
||||
all_results = {}
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
print("RUNNING FULL SENSITIVITY SUITE")
|
||||
print(f"{'='*80}")
|
||||
print(f"Testing {len(test_configs)} parameters")
|
||||
print()
|
||||
|
||||
for param_name, values in tqdm(test_configs, desc="Overall progress"):
|
||||
results = test_single_parameter_sensitivity(csv_path, symbol, param_name, values)
|
||||
all_results[param_name] = results
|
||||
analyze_sensitivity(param_name, results)
|
||||
|
||||
# Summary ranking
|
||||
print(f"\n{'='*80}")
|
||||
print("PARAMETER SENSITIVITY RANKING")
|
||||
print(f"{'='*80}")
|
||||
|
||||
# Calculate PnL ranges for ranking
|
||||
rankings = []
|
||||
for param_name, results in all_results.items():
|
||||
pnls = [r[2] for r in results]
|
||||
pnl_range = max(pnls) - min(pnls)
|
||||
rankings.append((param_name, pnl_range))
|
||||
|
||||
rankings.sort(key=lambda x: x[1], reverse=True)
|
||||
|
||||
print("\nOptimization Priority (by PnL impact):")
|
||||
for i, (param_name, pnl_range) in enumerate(rankings, 1):
|
||||
if pnl_range > 200:
|
||||
impact = "🔴 CRITICAL"
|
||||
elif pnl_range > 100:
|
||||
impact = "🟡 HIGH"
|
||||
elif pnl_range > 50:
|
||||
impact = "🟢 MODERATE"
|
||||
else:
|
||||
impact = "⚪ LOW"
|
||||
print(f"{i}. {param_name:30s}: ${pnl_range:8.2f} swing - {impact}")
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Single-parameter sensitivity analysis")
|
||||
parser.add_argument("--csv", type=Path, required=True, help="Path to OHLCV CSV")
|
||||
parser.add_argument("--symbol", default="SOL-PERP", help="Symbol name")
|
||||
parser.add_argument("--param", help="Test specific parameter only")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("="*80)
|
||||
print("V9 SENSITIVITY ANALYSIS")
|
||||
print("="*80)
|
||||
print(f"Data: {args.csv}")
|
||||
print(f"Symbol: {args.symbol}")
|
||||
|
||||
if args.param:
|
||||
# Test single parameter
|
||||
test_values = {
|
||||
"flip_threshold_percent": [0.3, 0.4, 0.5, 0.6, 0.7, 0.8],
|
||||
"ma_gap_threshold": [0.15, 0.25, 0.35, 0.45, 0.55, 0.65],
|
||||
"momentum_min_adx": [15.0, 18.0, 21.0, 24.0, 27.0, 30.0],
|
||||
"momentum_long_max_pos": [60.0, 65.0, 70.0, 75.0, 80.0],
|
||||
"momentum_short_min_pos": [15.0, 20.0, 25.0, 30.0, 35.0],
|
||||
"cooldown_bars": [1, 2, 3, 4, 5],
|
||||
"momentum_spacing": [2, 3, 4, 5, 6],
|
||||
"momentum_cooldown": [1, 2, 3, 4, 5]
|
||||
}
|
||||
|
||||
if args.param not in test_values:
|
||||
print(f"❌ Unknown parameter: {args.param}")
|
||||
print(f"Available: {', '.join(test_values.keys())}")
|
||||
return
|
||||
|
||||
results = test_single_parameter_sensitivity(
|
||||
args.csv, args.symbol, args.param, test_values[args.param]
|
||||
)
|
||||
analyze_sensitivity(args.param, results)
|
||||
else:
|
||||
# Test all parameters
|
||||
run_full_sensitivity_suite(args.csv, args.symbol)
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
218
scripts/trade_analysis.py
Executable file
218
scripts/trade_analysis.py
Executable file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Trade-Level Analysis - Find Patterns in Losing Trades
|
||||
Purpose: Identify what conditions lead to losses
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List, Dict
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.append(str(PROJECT_ROOT))
|
||||
|
||||
import pandas as pd
|
||||
from pathlib import Path
|
||||
from tqdm import tqdm
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.indicators.money_line import MoneyLineInputs, money_line_signals
|
||||
from backtester.simulator import TradeConfig, simulate_money_line, SimulatedTrade
|
||||
|
||||
|
||||
def analyze_trades(trades: List[SimulatedTrade]) -> Dict:
|
||||
"""Deep analysis of trade performance."""
|
||||
if not trades:
|
||||
return {"error": "No trades to analyze"}
|
||||
|
||||
winners = [t for t in trades if t.pnl > 0]
|
||||
losers = [t for t in trades if t.pnl < 0]
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("TRADE PERFORMANCE BREAKDOWN")
|
||||
print("="*80)
|
||||
print(f"Total Trades: {len(trades)}")
|
||||
print(f"Winners: {len(winners)} ({len(winners)/len(trades)*100:.1f}%)")
|
||||
print(f"Losers: {len(losers)} ({len(losers)/len(trades)*100:.1f}%)")
|
||||
|
||||
if winners:
|
||||
print(f"\nWinner Stats:")
|
||||
print(f" Avg PnL: ${sum(t.pnl for t in winners)/len(winners):.2f}")
|
||||
print(f" Max Win: ${max(t.pnl for t in winners):.2f}")
|
||||
print(f" Avg MFE: {sum(t.mfe for t in winners)/len(winners):.2f}%")
|
||||
print(f" Avg Bars: {sum(t.bars_held for t in winners)/len(winners):.1f}")
|
||||
|
||||
if losers:
|
||||
print(f"\nLoser Stats:")
|
||||
print(f" Avg PnL: ${sum(t.pnl for t in losers)/len(losers):.2f}")
|
||||
print(f" Max Loss: ${min(t.pnl for t in losers):.2f}")
|
||||
print(f" Avg MAE: {sum(t.mae for t in losers)/len(losers):.2f}%")
|
||||
print(f" Avg Bars: {sum(t.bars_held for t in losers)/len(losers):.1f}")
|
||||
|
||||
# Total P&L stats
|
||||
total_pnl = sum(t.pnl for t in trades)
|
||||
print(f"\nTotal P&L: ${total_pnl:.2f}")
|
||||
print(f"Avg Trade: ${total_pnl/len(trades):.2f}")
|
||||
|
||||
# MAE/MFE Analysis
|
||||
print("\n" + "="*80)
|
||||
print("MAE/MFE ANALYSIS (Max Adverse/Favorable Excursion)")
|
||||
print("="*80)
|
||||
|
||||
# Winners that gave back profit
|
||||
if winners:
|
||||
large_mfe_winners = [t for t in winners if t.mfe > 2.0]
|
||||
if large_mfe_winners:
|
||||
avg_giveback = sum(t.mfe - (t.pnl / 8100.0 * 100) for t in large_mfe_winners) / len(large_mfe_winners)
|
||||
print(f"Winners with >2% MFE: {len(large_mfe_winners)}")
|
||||
print(f" Average profit given back: {avg_giveback:.2f}%")
|
||||
print(f" 💡 Consider: Wider trailing stop or earlier TP2 trigger")
|
||||
|
||||
# Losers that could have been winners
|
||||
if losers:
|
||||
positive_mfe_losers = [t for t in losers if t.mfe > 0.5]
|
||||
if positive_mfe_losers:
|
||||
avg_peak = sum(t.mfe for t in positive_mfe_losers) / len(positive_mfe_losers)
|
||||
print(f"\nLosers that reached >0.5% profit: {len(positive_mfe_losers)}")
|
||||
print(f" Average peak profit: {avg_peak:.2f}%")
|
||||
print(f" 💡 Consider: Tighter TP1 or better stop management")
|
||||
|
||||
# Direction analysis
|
||||
print("\n" + "="*80)
|
||||
print("DIRECTION ANALYSIS")
|
||||
print("="*80)
|
||||
|
||||
longs = [t for t in trades if t.direction == 'long']
|
||||
shorts = [t for t in trades if t.direction == 'short']
|
||||
|
||||
if longs:
|
||||
long_wr = sum(1 for t in longs if t.pnl > 0) / len(longs) * 100
|
||||
long_pnl = sum(t.pnl for t in longs)
|
||||
print(f"LONGS: {len(longs)} trades, {long_wr:.1f}% WR, ${long_pnl:.2f} PnL")
|
||||
|
||||
if shorts:
|
||||
short_wr = sum(1 for t in shorts if t.pnl > 0) / len(shorts) * 100
|
||||
short_pnl = sum(t.pnl for t in shorts)
|
||||
print(f"SHORTS: {len(shorts)} trades, {short_wr:.1f}% WR, ${short_pnl:.2f} PnL")
|
||||
|
||||
if longs and shorts:
|
||||
if long_wr > short_wr + 10:
|
||||
print(f" 💡 LONGs outperform: Consider quality threshold adjustment")
|
||||
elif short_wr > long_wr + 10:
|
||||
print(f" 💡 SHORTs outperform: Consider quality threshold adjustment")
|
||||
|
||||
return {
|
||||
"total": len(trades),
|
||||
"winners": len(winners),
|
||||
"losers": len(losers),
|
||||
"win_rate": len(winners) / len(trades) * 100,
|
||||
"total_pnl": total_pnl
|
||||
}
|
||||
|
||||
|
||||
def find_exit_opportunities(trades: List[SimulatedTrade]):
|
||||
"""Analyze if different exit strategy could improve results."""
|
||||
print("\n" + "="*80)
|
||||
print("EXIT STRATEGY ANALYSIS")
|
||||
print("="*80)
|
||||
|
||||
# Current strategy stats
|
||||
tp1_hits = sum(1 for t in trades if t.exit_type == 'tp1')
|
||||
tp2_hits = sum(1 for t in trades if t.exit_type == 'tp2')
|
||||
sl_hits = sum(1 for t in trades if t.exit_type == 'sl')
|
||||
max_bars = sum(1 for t in trades if t.exit_type == 'max_bars')
|
||||
|
||||
print(f"Current Exit Distribution:")
|
||||
print(f" TP1: {tp1_hits} ({tp1_hits/len(trades)*100:.1f}%)")
|
||||
print(f" TP2: {tp2_hits} ({tp2_hits/len(trades)*100:.1f}%)")
|
||||
print(f" SL: {sl_hits} ({sl_hits/len(trades)*100:.1f}%)")
|
||||
print(f" Max Bars: {max_bars} ({max_bars/len(trades)*100:.1f}%)")
|
||||
|
||||
# Simulate tighter TP1
|
||||
could_take_tp1 = [t for t in trades if t.mfe >= 0.5 and t.pnl < 0]
|
||||
if could_take_tp1:
|
||||
saved_pnl = len(could_take_tp1) * 0.005 * 8100 # 0.5% profit instead of loss
|
||||
print(f"\n💡 Tighter TP1 (0.5%): Would save {len(could_take_tp1)} losing trades")
|
||||
print(f" Estimated improvement: +${saved_pnl:.2f}")
|
||||
|
||||
# Check if runners are worth it
|
||||
tp2_trades = [t for t in trades if t.exit_type == 'tp2']
|
||||
if tp2_trades:
|
||||
tp2_avg = sum(t.pnl for t in tp2_trades) / len(tp2_trades)
|
||||
print(f"\nTP2/Runner Performance:")
|
||||
print(f" {len(tp2_trades)} trades reached TP2")
|
||||
print(f" Average TP2 profit: ${tp2_avg:.2f}")
|
||||
if tp2_avg < 100:
|
||||
print(f" 💡 Runners not adding much value - consider 100% TP1 close")
|
||||
|
||||
|
||||
def compare_to_baseline(csv_path: Path, symbol: str, best_inputs: MoneyLineInputs):
|
||||
"""Compare best config to baseline."""
|
||||
print("\n" + "="*80)
|
||||
print("BASELINE COMPARISON")
|
||||
print("="*80)
|
||||
|
||||
print("\nLoading data and running simulations...")
|
||||
data_slice = load_csv(Path(csv_path), symbol, "5m")
|
||||
df = data_slice.data
|
||||
config = TradeConfig(position_size=8100.0, max_bars_per_trade=288)
|
||||
|
||||
baseline = MoneyLineInputs()
|
||||
print("\nRunning baseline config...")
|
||||
baseline_result = simulate_money_line(df, symbol, baseline, config)
|
||||
print(f" Trades: {len(baseline_result.trades)}")
|
||||
print(f" PnL: ${baseline_result.total_pnl:.2f}")
|
||||
print(f" WR: {baseline_result.win_rate:.1f}%")
|
||||
|
||||
print("\nRunning best sweep config...")
|
||||
best_result = simulate_money_line(df, symbol, best_inputs, config)
|
||||
print(f" Trades: {len(best_result.trades)}")
|
||||
print(f" PnL: ${best_result.total_pnl:.2f}")
|
||||
print(f" WR: {best_result.win_rate:.1f}%")
|
||||
|
||||
improvement = best_result.total_pnl - baseline_result.total_pnl
|
||||
print(f"\nImprovement: ${improvement:.2f} ({improvement/baseline_result.total_pnl*100:.1f}%)")
|
||||
|
||||
return best_result.trades
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Trade-level analysis")
|
||||
parser.add_argument("--csv", type=Path, required=True, help="Path to OHLCV CSV")
|
||||
parser.add_argument("--symbol", default="SOL-PERP", help="Symbol name")
|
||||
args = parser.parse_args()
|
||||
|
||||
print("="*80)
|
||||
print("V9 TRADE ANALYSIS")
|
||||
print("="*80)
|
||||
|
||||
# Use "best" config from sweep
|
||||
best_inputs = MoneyLineInputs(
|
||||
flip_threshold_percent=0.6,
|
||||
ma_gap_threshold=0.35,
|
||||
momentum_min_adx=23.0,
|
||||
momentum_long_max_pos=70.0,
|
||||
momentum_short_min_pos=25.0,
|
||||
cooldown_bars=2,
|
||||
momentum_spacing=3,
|
||||
momentum_cooldown=2
|
||||
)
|
||||
|
||||
trades = compare_to_baseline(args.csv, args.symbol, best_inputs)
|
||||
analyze_trades(trades)
|
||||
find_exit_opportunities(trades)
|
||||
|
||||
print("\n" + "="*80)
|
||||
print("ACTIONABLE RECOMMENDATIONS")
|
||||
print("="*80)
|
||||
print("Based on this analysis, the next optimization steps should focus on:")
|
||||
print("1. Exit strategy improvements (TP1/TP2 levels)")
|
||||
print("2. Direction-specific quality thresholds")
|
||||
print("3. Stop loss positioning")
|
||||
print("4. Entry timing refinement")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user