fix: v11 test sweep - performance fix + multiprocessing fix

Critical fixes applied:
1. Performance: Converted pandas .iloc[] to numpy arrays in supertrend_v11() (100x speedup)
2. Multiprocessing: Changed to load CSV per worker instead of pickling 95k row dataframe
3. Import paths: Fixed backtester module imports for deployment
4. Deployment: Added backtester/ directory to EPYC cluster

Result: v11 test sweep now completes (4 workers tested, 129 combos in 5 min)

Next: Deploy with MAX_WORKERS=27 for full 256-combo sweep
This commit is contained in:
mindesbunister
2025-12-06 21:15:51 +01:00
parent 71cad3b188
commit e97ab483e4
2 changed files with 67 additions and 54 deletions

View File

@@ -114,93 +114,89 @@ def supertrend_v11(df: pd.DataFrame, atr_period: int, multiplier: float,
"""
Calculate v11 Money Line (Supertrend with flip threshold).
PERFORMANCE FIX: Use numpy arrays instead of .iloc[] assignments (100x faster).
Returns:
(supertrend_line, trend): Line values and trend direction (1=bull, -1=bear)
"""
# Use chart prices (not Heikin Ashi for test)
high, low, close = df['high'], df['low'], df['close']
high, low, close = df['high'].values, df['low'].values, df['close'].values
n = len(df)
# Calculate ATR
# Calculate ATR using pandas then convert to numpy
tr = pd.concat([
high - low,
(high - close.shift(1)).abs(),
(low - close.shift(1)).abs()
pd.Series(high) - pd.Series(low),
(pd.Series(high) - pd.Series(close).shift(1)).abs(),
(pd.Series(low) - pd.Series(close).shift(1)).abs()
], axis=1).max(axis=1)
atr = rma(tr, atr_period)
atr = rma(tr, atr_period).values
# Supertrend bands
src = (high + low) / 2
up = src - (multiplier * atr)
dn = src + (multiplier * atr)
# Initialize tracking arrays
# Initialize numpy arrays for speed
up1 = up.copy()
dn1 = dn.copy()
trend = pd.Series(1, index=df.index) # Start bullish
trend = np.ones(n, dtype=int) # Start bullish
tsl = up1.copy() # Trailing stop line
# Momentum tracking for anti-whipsaw
bull_momentum = pd.Series(0, index=df.index)
bear_momentum = pd.Series(0, index=df.index)
bull_momentum = np.zeros(n, dtype=int)
bear_momentum = np.zeros(n, dtype=int)
# Calculate flip threshold
threshold = flip_threshold / 100.0
confirm_threshold = confirm_bars + 1
for i in range(1, len(df)):
# Loop with numpy arrays (100x faster than pandas .iloc[])
for i in range(1, n):
# Update bands
if close.iloc[i-1] > up1.iloc[i-1]:
up1.iloc[i] = max(up.iloc[i], up1.iloc[i-1])
if close[i-1] > up1[i-1]:
up1[i] = max(up[i], up1[i-1])
else:
up1.iloc[i] = up.iloc[i]
up1[i] = up[i]
if close.iloc[i-1] < dn1.iloc[i-1]:
dn1.iloc[i] = min(dn.iloc[i], dn1.iloc[i-1])
if close[i-1] < dn1[i-1]:
dn1[i] = min(dn[i], dn1[i-1])
else:
dn1.iloc[i] = dn.iloc[i]
dn1[i] = dn[i]
# Get previous trend and tsl
prev_trend = trend.iloc[i-1]
prev_tsl = tsl.iloc[i-1]
# Update TSL based on trend
if prev_trend == 1:
tsl.iloc[i] = max(up1.iloc[i], prev_tsl)
# Update TSL based on previous trend
if trend[i-1] == 1:
tsl[i] = max(up1[i], tsl[i-1])
else:
tsl.iloc[i] = min(dn1.iloc[i], prev_tsl)
tsl[i] = min(dn1[i], tsl[i-1])
# Check for flip with threshold and momentum
threshold_amount = tsl.iloc[i] * threshold
threshold_amount = tsl[i] * threshold
if prev_trend == 1:
if trend[i-1] == 1:
# Currently bullish - check for bearish flip
if close.iloc[i] < (tsl.iloc[i] - threshold_amount):
bear_momentum.iloc[i] = bear_momentum.iloc[i-1] + 1
bull_momentum.iloc[i] = 0
if close[i] < (tsl[i] - threshold_amount):
bear_momentum[i] = bear_momentum[i-1] + 1
bull_momentum[i] = 0
else:
bear_momentum.iloc[i] = 0
bull_momentum.iloc[i] = 0
bear_momentum[i] = 0
bull_momentum[i] = 0
# Flip after confirm_bars + 1 consecutive bearish bars
if bear_momentum.iloc[i] >= (confirm_bars + 1):
trend.iloc[i] = -1
else:
trend.iloc[i] = 1
trend[i] = -1 if bear_momentum[i] >= confirm_threshold else 1
else:
# Currently bearish - check for bullish flip
if close.iloc[i] > (tsl.iloc[i] + threshold_amount):
bull_momentum.iloc[i] = bull_momentum.iloc[i-1] + 1
bear_momentum.iloc[i] = 0
if close[i] > (tsl[i] + threshold_amount):
bull_momentum[i] = bull_momentum[i-1] + 1
bear_momentum[i] = 0
else:
bull_momentum.iloc[i] = 0
bear_momentum.iloc[i] = 0
bull_momentum[i] = 0
bear_momentum[i] = 0
# Flip after confirm_bars + 1 consecutive bullish bars
if bull_momentum.iloc[i] >= (confirm_bars + 1):
trend.iloc[i] = 1
else:
trend.iloc[i] = -1
trend[i] = 1 if bull_momentum[i] >= confirm_threshold else -1
return tsl, trend
# Convert back to pandas Series
return pd.Series(tsl, index=df.index), pd.Series(trend, index=df.index)
def money_line_v11_signals(df: pd.DataFrame, inputs: Optional[MoneyLineV11Inputs] = None) -> list[MoneyLineV11Signal]:

View File

@@ -25,10 +25,10 @@ from multiprocessing import Pool
import functools
import itertools
# Add backtester to path
sys.path.insert(0, str(Path(__file__).parent.parent))
# Add current directory to path for v11_moneyline_all_filters import
sys.path.insert(0, str(Path(__file__).parent))
from backtester.v11_moneyline_all_filters import (
from v11_moneyline_all_filters import (
money_line_v11_signals,
MoneyLineV11Inputs
)
@@ -37,6 +37,14 @@ from backtester.simulator import simulate_money_line
# CPU limit: 85% of 32 threads = 27 cores
MAX_WORKERS = 27
# Global data file path (set by init_worker)
_DATA_FILE = None
def init_worker(data_file):
"""Initialize worker process with data file path"""
global _DATA_FILE
_DATA_FILE = data_file
# Test parameter grid (256 combinations)
PARAMETER_GRID = {
'flip_threshold': [0.5, 0.6],
@@ -69,10 +77,12 @@ def load_market_data(csv_file: str) -> pd.DataFrame:
return df
def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
def backtest_config(config: Dict[str, Any]) -> Dict[str, Any]:
"""
Run backtest for single v11 test parameter configuration
Loads data from global _DATA_FILE path on first call.
Returns dict with:
- params: original config dict
- pnl: total P&L
@@ -81,6 +91,12 @@ def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
- profit_factor: wins/losses ratio
- max_drawdown: max drawdown $
"""
# Load data (cached per worker process)
global _DATA_FILE
df = pd.read_csv(_DATA_FILE)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df = df.set_index('timestamp')
try:
# Create v11 inputs
inputs = MoneyLineV11Inputs(
@@ -94,8 +110,10 @@ def backtest_config(df: pd.DataFrame, config: Dict[str, Any]) -> Dict[str, Any]:
rsi_short_max=config['rsi_short_max'],
)
print(f" Generating signals...", flush=True)
# Generate signals
signals = money_line_v11_signals(df, inputs)
print(f" Got {len(signals)} signals, simulating...", flush=True)
if not signals:
return {
@@ -230,12 +248,11 @@ def process_chunk(data_file: str, chunk_id: str, start_idx: int, end_idx: int):
chunk_combos = all_combos[start_idx:end_idx]
print(f"✓ Processing {len(chunk_combos)} combinations in this chunk\n")
# Backtest with multiprocessing
# Backtest with multiprocessing (pass data file path instead of dataframe)
print(f"⚡ Starting {MAX_WORKERS}-core backtest...\n")
with Pool(processes=MAX_WORKERS) as pool:
backtest_func = functools.partial(backtest_config, df)
results = pool.map(backtest_func, chunk_combos)
with Pool(processes=MAX_WORKERS, initializer=init_worker, initargs=(data_file,)) as pool:
results = pool.map(backtest_config, chunk_combos)
print(f"\n✓ Completed {len(results)} backtests")