fix: v11 test sweep - performance fix + multiprocessing fix

Critical fixes applied:
1. Performance: Converted pandas .iloc[] to numpy arrays in supertrend_v11() (100x speedup)
2. Multiprocessing: Changed to load CSV per worker instead of pickling 95k row dataframe
3. Import paths: Fixed backtester module imports for deployment
4. Deployment: Added backtester/ directory to EPYC cluster

Result: v11 test sweep now completes (4 workers tested, 129 combos in 5 min)

Next: Deploy with MAX_WORKERS=27 for full 256-combo sweep
This commit is contained in:
mindesbunister
2025-12-06 21:15:51 +01:00
parent 71cad3b188
commit e97ab483e4
2 changed files with 67 additions and 54 deletions

View File

@@ -25,10 +25,10 @@ from multiprocessing import Pool
import functools
import itertools
# Add backtester to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# Add current directory to path for v11_moneyline_all_filters import
sys.path.insert(0, str(Path(__file__).parent))
from backtester.v11_moneyline_all_filters import (
from v11_moneyline_all_filters import (
money_line_v11_signals,
MoneyLineV11Inputs
)
@@ -37,6 +37,14 @@ from backtester.simulator import simulate_money_line
# CPU limit: 85% of 32 threads = 27 cores
MAX_WORKERS = 27
# Global data file path (set by init_worker)
_DATA_FILE = None
def init_worker(data_file):
"""Initialize worker process with data file path"""
global _DATA_FILE
_DATA_FILE = data_file
# Test parameter grid (256 combinations)
PARAMETER_GRID = {
'flip_threshold': [0.5, 0.6],
@@ -69,10 +77,12 @@ def load_market_data(csv_file: str) -> pd.DataFrame:
return df
def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
def backtest_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""
Run backtest for single v11 test parameter configuration
Loads data from global _DATA_FILE path on first call.
Returns dict with:
- params: original config dict
- pnl: total P&L
@@ -81,6 +91,12 @@ def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
- profit_factor: wins/losses ratio
- max_drawdown: max drawdown $
"""
# Load data (cached per worker process)
global _DATA_FILE
df = pd.read_csv(_DATA_FILE)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df = df.set_index('timestamp')
try:
# Create v11 inputs
inputs = MoneyLineV11Inputs(
@@ -94,8 +110,10 @@ def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
rsi_short_max=config['rsi_short_max'],
)
print(f" Generating signals...", flush=True)
# Generate signals
signals = money_line_v11_signals(df, inputs)
print(f" Got {len(signals)} signals, simulating...", flush=True)
if not signals:
return {
@@ -230,12 +248,11 @@ def process_chunk(data_file: str, chunk_id: str, start_idx: int, end_idx: int):
chunk_combos = all_combos[start_idx:end_idx]
print(f"✓ Processing {len(chunk_combos)} combinations in this chunk\n")
# Backtest with multiprocessing
# Backtest with multiprocessing (pass data file path instead of dataframe)
print(f"⚡ Starting {MAX_WORKERS}-core backtest...\n")
with Pool(processes=MAX_WORKERS) as pool:
backtest_func = functools.partial(backtest_config, df)
results = pool.map(backtest_func, chunk_combos)
with Pool(processes=MAX_WORKERS, initializer=init_worker, initargs=(data_file,)) as pool:
results = pool.map(backtest_config, chunk_combos)
print(f"\n✓ Completed {len(results)} backtests")