remove: V10 momentum system - backtest proved it adds no value
- Removed v10 TradingView indicator (moneyline_v10_momentum_dots.pinescript) - Removed v10 penalty system from signal-quality.ts (-30/-25 point penalties) - Removed backtest result files (sweep_*.csv) - Updated copilot-instructions.md to remove v10 references - Simplified direction-specific quality thresholds (LONG 90+, SHORT 80+) Rationale: - 1,944 parameter combinations tested in backtest - All top results IDENTICAL (568 trades, $498 P&L, 61.09% WR) - Momentum parameters had ZERO impact on trade selection - Profit factor 1.027 too low (barely profitable after fees) - Max drawdown -$1,270 vs +$498 profit = terrible risk-reward - v10 penalties were blocking good trades (bug: applied to wrong positions) Keeping v9 as production system - simpler, proven, effective.
This commit is contained in:
161
scripts/export_binance_ohlcv.py
Normal file
161
scripts/export_binance_ohlcv.py
Normal file
@@ -0,0 +1,161 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Download historical OHLCV data from Binance and store it as CSV."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
from urllib.parse import urlencode
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
import pandas as pd
|
||||
|
||||
BINANCE_REST_BASE = "https://api.binance.com/api/v3/klines"
|
||||
MAX_LIMIT = 1000
|
||||
# Interval → milliseconds mapping taken from Binance documentation
|
||||
INTERVAL_MS: Dict[str, int] = {
|
||||
"1m": 60_000,
|
||||
"3m": 180_000,
|
||||
"5m": 300_000,
|
||||
"15m": 900_000,
|
||||
"30m": 1_800_000,
|
||||
"1h": 3_600_000,
|
||||
"2h": 7_200_000,
|
||||
"4h": 14_400_000,
|
||||
"6h": 21_600_000,
|
||||
"8h": 28_800_000,
|
||||
"12h": 43_200_000,
|
||||
"1d": 86_400_000,
|
||||
}
|
||||
|
||||
|
||||
def parse_args() -> argparse.Namespace:
|
||||
parser = argparse.ArgumentParser(description="Export OHLCV candles from Binance")
|
||||
parser.add_argument("--symbol", default="SOLUSDT", help="Binance symbol, e.g. SOLUSDT")
|
||||
parser.add_argument(
|
||||
"--interval",
|
||||
default="5m",
|
||||
choices=sorted(INTERVAL_MS.keys()),
|
||||
help="Binance interval (default: 5m)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--start",
|
||||
required=True,
|
||||
help="Start timestamp (ISO 8601, e.g. 2024-01-01 or 2024-01-01T00:00:00Z)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--end",
|
||||
required=True,
|
||||
help="End timestamp (ISO 8601, inclusive)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
type=Path,
|
||||
required=True,
|
||||
help="Path to output CSV file (directories created automatically)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rate-limit-wait",
|
||||
type=float,
|
||||
default=0.2,
|
||||
help="Seconds to sleep between paginated requests (default: 0.2s)",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def to_millis(value: str) -> int:
|
||||
ts = pd.Timestamp(value)
|
||||
if ts.tzinfo is None:
|
||||
ts = ts.tz_localize("UTC")
|
||||
else:
|
||||
ts = ts.tz_convert("UTC")
|
||||
return int(ts.timestamp() * 1000)
|
||||
|
||||
|
||||
def fetch_chunk(symbol: str, interval: str, start_ms: int, end_ms: int) -> List[List[float]]:
|
||||
params = {
|
||||
"symbol": symbol.upper(),
|
||||
"interval": interval,
|
||||
"startTime": start_ms,
|
||||
"endTime": end_ms,
|
||||
"limit": MAX_LIMIT,
|
||||
}
|
||||
query = urlencode(params)
|
||||
url = f"{BINANCE_REST_BASE}?{query}"
|
||||
req = Request(url, headers={"User-Agent": "binance-export/1.0"})
|
||||
with urlopen(req, timeout=30) as resp:
|
||||
payload = resp.read()
|
||||
data = json.loads(payload)
|
||||
if isinstance(data, dict) and data.get("code"):
|
||||
raise RuntimeError(f"Binance error {data['code']}: {data.get('msg')}")
|
||||
return data # type: ignore[return-value]
|
||||
|
||||
|
||||
def main() -> int:
|
||||
args = parse_args()
|
||||
interval = args.interval
|
||||
if interval not in INTERVAL_MS:
|
||||
raise ValueError(f"Unsupported interval: {interval}")
|
||||
|
||||
start_ms = to_millis(args.start)
|
||||
end_ms = to_millis(args.end)
|
||||
if end_ms <= start_ms:
|
||||
raise ValueError("End time must be after start time")
|
||||
|
||||
interval_ms = INTERVAL_MS[interval]
|
||||
cursor = start_ms
|
||||
rows: List[List[float]] = []
|
||||
request_count = 0
|
||||
|
||||
while cursor < end_ms:
|
||||
chunk = fetch_chunk(args.symbol, interval, cursor, end_ms)
|
||||
if not chunk:
|
||||
break
|
||||
rows.extend(chunk)
|
||||
request_count += 1
|
||||
last_open = chunk[-1][0]
|
||||
cursor = last_open + interval_ms
|
||||
if len(chunk) < MAX_LIMIT:
|
||||
break
|
||||
time.sleep(args.rate_limit_wait)
|
||||
|
||||
if not rows:
|
||||
raise RuntimeError("No data returned from Binance")
|
||||
|
||||
columns = [
|
||||
"open_time",
|
||||
"open",
|
||||
"high",
|
||||
"low",
|
||||
"close",
|
||||
"volume",
|
||||
"close_time",
|
||||
"quote_asset_volume",
|
||||
"number_of_trades",
|
||||
"taker_buy_base",
|
||||
"taker_buy_quote",
|
||||
"ignore",
|
||||
]
|
||||
df = pd.DataFrame(rows, columns=columns)
|
||||
df = df.drop(columns=["close_time", "quote_asset_volume", "ignore"])
|
||||
df["timestamp"] = pd.to_datetime(df["open_time"], unit="ms", utc=True).dt.tz_convert(None)
|
||||
df = df[["timestamp", "open", "high", "low", "close", "volume", "number_of_trades", "taker_buy_base", "taker_buy_quote"]]
|
||||
df[["open", "high", "low", "close", "volume"]] = df[["open", "high", "low", "close", "volume"]].astype(float)
|
||||
|
||||
args.output.parent.mkdir(parents=True, exist_ok=True)
|
||||
df.to_csv(args.output, index=False)
|
||||
|
||||
first = df.iloc[0].timestamp
|
||||
last = df.iloc[-1].timestamp
|
||||
duration_days = (last - first).days
|
||||
print(f"Saved {len(df):,} candles for {args.symbol} ({interval}) spanning ~{duration_days} days")
|
||||
print(f"Requests made: {request_count}")
|
||||
print(f"Output: {args.output}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
273
scripts/run_backtest_sweep.py
Normal file
273
scripts/run_backtest_sweep.py
Normal file
@@ -0,0 +1,273 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Parameter sweep utility for Money Line backtests."""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
from itertools import product
|
||||
from multiprocessing import Pool
|
||||
from pathlib import Path
|
||||
from typing import List, Sequence
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
PROJECT_ROOT = Path(__file__).resolve().parents[1]
|
||||
if str(PROJECT_ROOT) not in sys.path:
|
||||
sys.path.append(str(PROJECT_ROOT))
|
||||
|
||||
import pandas as pd
|
||||
|
||||
from backtester.data_loader import load_csv
|
||||
from backtester.indicators.money_line import MoneyLineInputs
|
||||
from backtester.simulator import TradeConfig, simulate_money_line
|
||||
|
||||
|
||||
_DATA_SLICE = None
|
||||
_TRADE_CONFIG = None
|
||||
_GRID_KEYS: Sequence[str] = []
|
||||
|
||||
|
||||
def parse_float_list(value: str) -> List[float]:
|
||||
return [float(item) for item in value.split(",") if item]
|
||||
|
||||
|
||||
def parse_int_list(value: str) -> List[int]:
|
||||
return [int(item) for item in value.split(",") if item]
|
||||
|
||||
|
||||
def build_arg_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(description="Run Money Line parameter sweeps against a CSV dataset")
|
||||
parser.add_argument("--csv", type=Path, required=True, help="Path to CSV with OHLCV data")
|
||||
parser.add_argument("--symbol", required=True, help="Symbol label (e.g., SOL-PERP)")
|
||||
parser.add_argument("--timeframe", default="5", help="Timeframe label (for reporting only)")
|
||||
parser.add_argument("--start", help="Optional ISO start timestamp filter", default=None)
|
||||
parser.add_argument("--end", help="Optional ISO end timestamp filter", default=None)
|
||||
parser.add_argument("--position-size", type=float, default=8100.0, help="Notional position size per trade")
|
||||
parser.add_argument("--max-bars", type=int, default=288, help="Max bars to hold a trade (default 288 = 24h on 5m)")
|
||||
parser.add_argument("--top", type=int, default=10, help="How many results to show")
|
||||
parser.add_argument(
|
||||
"--output",
|
||||
type=Path,
|
||||
help="Optional CSV to store every combination's metrics",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--flip-thresholds",
|
||||
default="0.5,0.6,0.7",
|
||||
help="Comma separated flip threshold percentages",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ma-gap-thresholds",
|
||||
default="0.25,0.35,0.45",
|
||||
help="Comma separated MA gap thresholds",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--momentum-adx",
|
||||
default="22,24,26",
|
||||
help="Comma separated ADX minimums for momentum dots",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--momentum-long-pos",
|
||||
default="65,70",
|
||||
help="Comma separated maximum price position for long momentum entries",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--momentum-short-pos",
|
||||
default="30,35",
|
||||
help="Comma separated minimum price position for short momentum entries",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--cooldown-bars",
|
||||
default="2,3,4",
|
||||
help="Comma separated cooldown bar counts between primary flips",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--momentum-spacing",
|
||||
default="3,4,5",
|
||||
help="Comma separated spacing (bars) between momentum signals",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--momentum-cooldown",
|
||||
default="2,3",
|
||||
help="Comma separated cooldown (bars) after primary signal before momentum allowed",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--workers",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of worker processes (use >1 for multi-core sweeps)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--limit",
|
||||
type=int,
|
||||
help="Optional number of combinations to run (preview mode)",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
def run_sweep(args: argparse.Namespace) -> pd.DataFrame:
|
||||
data_slice = load_csv(args.csv, args.symbol, args.timeframe, start=args.start, end=args.end)
|
||||
trade_config = TradeConfig(position_size=args.position_size, max_bars_per_trade=args.max_bars)
|
||||
|
||||
grids = {
|
||||
"flip_threshold_percent": parse_float_list(args.flip_thresholds),
|
||||
"ma_gap_threshold": parse_float_list(args.ma_gap_thresholds),
|
||||
"momentum_min_adx": parse_float_list(args.momentum_adx),
|
||||
"momentum_long_max_pos": parse_float_list(args.momentum_long_pos),
|
||||
"momentum_short_min_pos": parse_float_list(args.momentum_short_pos),
|
||||
"cooldown_bars": parse_int_list(args.cooldown_bars),
|
||||
"momentum_spacing": parse_int_list(args.momentum_spacing),
|
||||
"momentum_cooldown": parse_int_list(args.momentum_cooldown),
|
||||
}
|
||||
|
||||
combos = list(product(*grids.values()))
|
||||
total_combos = len(combos)
|
||||
print(f"Evaluating {total_combos} combinations...")
|
||||
|
||||
if args.limit is not None and args.limit > 0 and args.limit < total_combos:
|
||||
combos = combos[: args.limit]
|
||||
print(f"Preview mode: running first {len(combos)} combos (out of {total_combos})")
|
||||
|
||||
keys = list(grids.keys())
|
||||
worker_count = args.workers if args.workers and args.workers > 0 else 1
|
||||
total_to_run = len(combos)
|
||||
progress = ProgressBar(total=total_to_run)
|
||||
|
||||
if total_to_run == 0:
|
||||
print("No combinations to evaluate with current configuration.")
|
||||
return pd.DataFrame()
|
||||
|
||||
if worker_count <= 1:
|
||||
records = []
|
||||
for idx, combo in enumerate(combos, start=1):
|
||||
records.append(_evaluate_combo(combo, keys, data_slice, trade_config))
|
||||
progress.update(idx)
|
||||
else:
|
||||
print(f"Using {worker_count} worker processes")
|
||||
with Pool(
|
||||
processes=worker_count,
|
||||
initializer=_init_worker,
|
||||
initargs=(data_slice, trade_config, keys),
|
||||
) as pool:
|
||||
records = []
|
||||
for idx, record in enumerate(pool.imap_unordered(_worker_eval, combos), start=1):
|
||||
records.append(record)
|
||||
progress.update(idx)
|
||||
|
||||
progress.finish()
|
||||
|
||||
elapsed = progress.elapsed
|
||||
combos_run = len(combos)
|
||||
avg_time = elapsed / combos_run if combos_run else 0
|
||||
print(
|
||||
f"Processed {combos_run} combos in {elapsed:.1f}s (avg {avg_time:.2f}s per combo)"
|
||||
)
|
||||
if combos_run < total_combos:
|
||||
projected = avg_time * total_combos
|
||||
print(
|
||||
f"Estimated full sweep ({total_combos} combos) would take ~{projected/60:.1f} minutes with current settings"
|
||||
)
|
||||
|
||||
return pd.DataFrame(records)
|
||||
|
||||
|
||||
def _init_worker(data_slice, trade_config, keys):
|
||||
global _DATA_SLICE, _TRADE_CONFIG, _GRID_KEYS
|
||||
_DATA_SLICE = data_slice
|
||||
_TRADE_CONFIG = trade_config
|
||||
_GRID_KEYS = keys
|
||||
|
||||
|
||||
def _worker_eval(combo):
|
||||
return _evaluate_combo(combo, _GRID_KEYS, _DATA_SLICE, _TRADE_CONFIG)
|
||||
|
||||
|
||||
def _evaluate_combo(combo, keys, data_slice, trade_config):
|
||||
params = dict(zip(keys, combo))
|
||||
inputs = MoneyLineInputs(**params)
|
||||
result = simulate_money_line(data_slice.data, data_slice.symbol, inputs=inputs, config=trade_config)
|
||||
trades = result.trades
|
||||
pnl = result.total_pnl
|
||||
win_rate = result.win_rate * 100
|
||||
max_dd = result.max_drawdown
|
||||
avg_trade = result.average_pnl
|
||||
profit_factor = _profit_factor(trades)
|
||||
return {
|
||||
**params,
|
||||
"trades": len(trades),
|
||||
"total_pnl": pnl,
|
||||
"win_rate": win_rate,
|
||||
"avg_pnl": avg_trade,
|
||||
"max_drawdown": max_dd,
|
||||
"profit_factor": profit_factor,
|
||||
}
|
||||
|
||||
|
||||
class ProgressBar:
|
||||
def __init__(self, total: int, bar_length: int = 40) -> None:
|
||||
self.total = max(total, 0)
|
||||
self.bar_length = bar_length
|
||||
self.start_time = time.time()
|
||||
self._done = False
|
||||
|
||||
def update(self, count: int) -> None:
|
||||
if self.total <= 0:
|
||||
return
|
||||
count = min(count, self.total)
|
||||
percent = count / self.total
|
||||
filled = int(self.bar_length * percent)
|
||||
bar = "#" * filled + "-" * (self.bar_length - filled)
|
||||
elapsed = time.time() - self.start_time
|
||||
rate = elapsed / count if count else 0
|
||||
remaining = rate * (self.total - count) if rate else 0
|
||||
sys.stdout.write(
|
||||
f"\r[{bar}] {percent*100:5.1f}% ({count}/{self.total}) | "
|
||||
f"Elapsed {elapsed:6.1f}s | ETA {remaining:6.1f}s"
|
||||
)
|
||||
sys.stdout.flush()
|
||||
if count >= self.total:
|
||||
self._done = True
|
||||
sys.stdout.write("\n")
|
||||
|
||||
def finish(self) -> None:
|
||||
if not self._done and self.total > 0:
|
||||
self.update(self.total)
|
||||
|
||||
@property
|
||||
def elapsed(self) -> float:
|
||||
return time.time() - self.start_time
|
||||
|
||||
|
||||
def _profit_factor(trades) -> float:
|
||||
gains = [t.realized_pnl for t in trades if t.realized_pnl > 0]
|
||||
losses = [-t.realized_pnl for t in trades if t.realized_pnl < 0]
|
||||
total_gain = sum(gains)
|
||||
total_loss = sum(losses)
|
||||
if total_loss == 0:
|
||||
return float("inf") if total_gain > 0 else 0.0
|
||||
return total_gain / total_loss
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = build_arg_parser()
|
||||
args = parser.parse_args()
|
||||
df = run_sweep(args)
|
||||
df = df.sort_values("total_pnl", ascending=False)
|
||||
|
||||
top_n = df.head(args.top)
|
||||
print("\n=== TOP RESULTS ===")
|
||||
for idx, row in top_n.iterrows():
|
||||
print(
|
||||
f"PnL ${row['total_pnl']:.2f} | Trades {row['trades']} | Win {row['win_rate']:.2f}% | "
|
||||
f"DD ${row['max_drawdown']:.2f} | PF {row['profit_factor']:.2f} | params {row.drop(['trades','total_pnl','win_rate','avg_pnl','max_drawdown','profit_factor']).to_dict()}"
|
||||
)
|
||||
|
||||
if args.output:
|
||||
args.output.parent.mkdir(parents=True, exist_ok=True)
|
||||
df.to_csv(args.output, index=False)
|
||||
print(f"\nSaved all {len(df)} results to {args.output}")
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
Reference in New Issue
Block a user