From 6cec2e8e71572ee395b4391065bda203ea29d87c Mon Sep 17 00:00:00 2001 From: mindesbunister Date: Mon, 1 Dec 2025 23:45:21 +0100 Subject: [PATCH] critical: Fix Smart Entry Validation Queue wrong price display - Bug: Validation queue used TradingView symbol format (SOLUSDT) to lookup market data cache - Cache uses normalized Drift format (SOL-PERP) - Result: Cache lookup failed, wrong/stale price shown in Telegram abandonment notifications - Real incident: Signal at $126.00 showed $98.18 abandonment price (-22.08% impossible drop) - Fix: Added normalizeTradingViewSymbol() call in check-risk endpoint before passing to validation queue - Files changed: app/api/trading/check-risk/route.ts (import + symbol normalization) - Impact: Validation queue now correctly retrieves current price from market data cache - Deployed: Dec 1, 2025 --- app/api/trading/check-risk/route.ts | 13 ++++-- cluster/exploration.db | Bin 204800 -> 204800 bytes cluster/test_adapter.py | 45 ++++++++++++++++++ cluster/test_chunk_load.py | 14 ++++++ cluster/v9_advanced_coordinator.py | 69 ++++++++++++++++++++-------- docker-compose.telegram-bot.yml | 1 + 6 files changed, 120 insertions(+), 22 deletions(-) create mode 100644 cluster/test_adapter.py create mode 100644 cluster/test_chunk_load.py diff --git a/app/api/trading/check-risk/route.ts b/app/api/trading/check-risk/route.ts index 4d1e961..c51c6bd 100644 --- a/app/api/trading/check-risk/route.ts +++ b/app/api/trading/check-risk/route.ts @@ -6,7 +6,7 @@ */ import { NextRequest, NextResponse } from 'next/server' -import { getMergedConfig, TradingConfig, getMinQualityScoreForDirection } from '@/config/trading' +import { getMergedConfig, TradingConfig, getMinQualityScoreForDirection, normalizeTradingViewSymbol } from '@/config/trading' import { getInitializedPositionManager, ActiveTrade } from '@/lib/trading/position-manager' import { getLastTradeTime, getLastTradeTimeForSymbol, getTradesInLastHour, getTodayPnL, createBlockedSignal } from '@/lib/database/trades' import { getPythPriceMonitor } from '@/lib/pyth/price-monitor' @@ -432,9 +432,16 @@ export async function POST(request: NextRequest): PromiseNQX>MnCZ*ps8ax!Ty3^#-P&xica0k{0m G0t8S9iz%J} delta 102 zcmV-s0Ga=Qzzl%E43HZEK#?3n0YI@}tG@xulOW(*vuD660t+%}E#VPqE#UMoMw5}l zAG5o{<_rNXk&!71GXe@Z2m%5d00fgE#TF4TF$3rW=x}9jWNB_^X)WPygZ$5j{LcZm I{LcaeP|b!X{Qv*} diff --git a/cluster/test_adapter.py b/cluster/test_adapter.py new file mode 100644 index 0000000..671e659 --- /dev/null +++ b/cluster/test_adapter.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +"""Test profile adapter logic""" +from money_line_v9 import MoneyLineV9Inputs + +# Test profile adapter logic +config = { + 'profile': 'minutes', + 'atr_minutes': 12, + 'mult_minutes': 3.8, + 'rsi_long_min': 35, + 'rsi_long_max': 70, + 'rsi_short_min': 30, + 'rsi_short_max': 70, + 'vol_max': 3.5, + 'entry_buffer': 0.2, + 'adx_length': 16, + 'use_ma_gap': False, + 'ma_gap_min_long': 0.0, + 'ma_gap_min_short': 0.0 +} + +# Simulate adapter logic +profile = config['profile'] +atr_map = {'minutes': config.get('atr_minutes', 12)} +mult_map = {'minutes': config.get('mult_minutes', 3.8)} + +# Create inputs with mapped parameters +inputs = MoneyLineV9Inputs( + atr_period=atr_map[profile], + multiplier=mult_map[profile], + rsi_long_min=config['rsi_long_min'], + rsi_long_max=config['rsi_long_max'], + rsi_short_min=config['rsi_short_min'], + rsi_short_max=config['rsi_short_max'], + vol_max=config['vol_max'], + entry_buffer_atr=config['entry_buffer'], + adx_length=config['adx_length'], + use_ma_gap_filter=config['use_ma_gap'], + ma_gap_long_min=config['ma_gap_min_long'], + ma_gap_short_max=config['ma_gap_min_short'] +) + +print(f"āœ… MoneyLineV9Inputs created successfully!") +print(f" atr_period={inputs.atr_period}, multiplier={inputs.multiplier}") +print(f" use_ma_gap_filter={inputs.use_ma_gap_filter}") diff --git a/cluster/test_chunk_load.py b/cluster/test_chunk_load.py new file mode 100644 index 0000000..78c4fab --- /dev/null +++ b/cluster/test_chunk_load.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Test loading chunk file""" +import json +import sys + +# Test loading chunk file +with open('chunks/v9_advanced_chunk_0002.json', 'r') as f: + chunk_data = json.load(f) + +print(f"āœ… Chunk file loaded successfully") +print(f" Chunk ID: {chunk_data['chunk_id']}") +print(f" Configs: {len(chunk_data['configs'])}") +print(f" First config keys: {list(chunk_data['configs'][0].keys())}") +print(f" First config profile: {chunk_data['configs'][0]['profile']}") diff --git a/cluster/v9_advanced_coordinator.py b/cluster/v9_advanced_coordinator.py index 98f34cd..e324df8 100755 --- a/cluster/v9_advanced_coordinator.py +++ b/cluster/v9_advanced_coordinator.py @@ -88,27 +88,14 @@ def launch_worker(chunk_id: str, worker_name: str): print(f"Command: {full_cmd}") try: - result = subprocess.run( - full_cmd, - shell=True, - capture_output=True, - text=True, - timeout=3600 # 1 hour timeout per chunk - ) - - if result.returncode == 0: - print(f"āœ“ {chunk_id} completed on {worker_name}") - mark_complete(chunk_id) - else: - print(f"āœ— {chunk_id} failed on {worker_name}") - print(f"Error: {result.stderr}") - mark_failed(chunk_id) + # Launch in background with nohup so it doesn't block coordinator + # Worker will create completion marker file when done + bg_cmd = f"nohup {full_cmd} > /tmp/{chunk_id}.log 2>&1 &" + subprocess.Popen(bg_cmd, shell=True) + print(f"āœ“ {chunk_id} launched on {worker_name} (background)") - except subprocess.TimeoutExpired: - print(f"āš ļø {chunk_id} timed out on {worker_name}") - mark_failed(chunk_id) except Exception as e: - print(f"āŒ Error running {chunk_id} on {worker_name}: {e}") + print(f"āŒ Error launching {chunk_id} on {worker_name}: {e}") mark_failed(chunk_id) def mark_complete(chunk_id: str): @@ -141,6 +128,47 @@ def mark_failed(chunk_id: str): conn.commit() conn.close() +def check_completions(): + """Check for completed chunks by looking for output files""" + conn = sqlite3.connect(DB_PATH) + cursor = conn.cursor() + + # Get all running chunks + cursor.execute(""" + SELECT id, assigned_worker FROM v9_advanced_chunks + WHERE status = 'running' + """) + + running_chunks = cursor.fetchall() + + for chunk_id, worker_name in running_chunks: + output_file = Path(f"distributed_results/{chunk_id}_results.csv") + + # Check if output file exists locally (would be copied back) + if output_file.exists(): + print(f"āœ“ {chunk_id} completed (found output file)") + mark_complete(chunk_id) + else: + # Check on remote worker + worker = WORKERS[worker_name] + remote_output = f"{worker['workspace']}/distributed_results/{chunk_id}_results.csv" + + if 'ssh_hop' in worker: + check_cmd = f"ssh {worker['ssh_hop']} \"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'\"" + else: + check_cmd = f"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'" + + try: + result = subprocess.run(check_cmd, shell=True, capture_output=True, text=True, timeout=10) + if result.stdout.strip() == 'EXISTS': + print(f"āœ“ {chunk_id} completed on {worker_name}") + mark_complete(chunk_id) + except Exception as e: + # If check fails, chunk might still be running + pass + + conn.close() + def main(): """Main coordinator loop""" print("="*60) @@ -175,6 +203,9 @@ def main(): print(f"Status: {completed} completed, {running} running, {pending} pending") + # Check for completed chunks + check_completions() + if pending == 0 and running == 0: print("\nāœ“ All chunks completed!") break diff --git a/docker-compose.telegram-bot.yml b/docker-compose.telegram-bot.yml index 894b621..3776b90 100644 --- a/docker-compose.telegram-bot.yml +++ b/docker-compose.telegram-bot.yml @@ -10,6 +10,7 @@ services: env_file: - .env.telegram-bot dns: + - 9.9.9.9 - 8.8.8.8 - 8.8.4.4 environment: