critical: Fix Smart Entry Validation Queue wrong price display
- Bug: Validation queue used TradingView symbol format (SOLUSDT) to lookup market data cache - Cache uses normalized Drift format (SOL-PERP) - Result: Cache lookup failed, wrong/stale price shown in Telegram abandonment notifications - Real incident: Signal at $126.00 showed $98.18 abandonment price (-22.08% impossible drop) - Fix: Added normalizeTradingViewSymbol() call in check-risk endpoint before passing to validation queue - Files changed: app/api/trading/check-risk/route.ts (import + symbol normalization) - Impact: Validation queue now correctly retrieves current price from market data cache - Deployed: Dec 1, 2025
This commit is contained in:
Binary file not shown.
45
cluster/test_adapter.py
Normal file
45
cluster/test_adapter.py
Normal file
@@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test profile adapter logic"""
|
||||
from money_line_v9 import MoneyLineV9Inputs
|
||||
|
||||
# Test profile adapter logic
|
||||
config = {
|
||||
'profile': 'minutes',
|
||||
'atr_minutes': 12,
|
||||
'mult_minutes': 3.8,
|
||||
'rsi_long_min': 35,
|
||||
'rsi_long_max': 70,
|
||||
'rsi_short_min': 30,
|
||||
'rsi_short_max': 70,
|
||||
'vol_max': 3.5,
|
||||
'entry_buffer': 0.2,
|
||||
'adx_length': 16,
|
||||
'use_ma_gap': False,
|
||||
'ma_gap_min_long': 0.0,
|
||||
'ma_gap_min_short': 0.0
|
||||
}
|
||||
|
||||
# Simulate adapter logic
|
||||
profile = config['profile']
|
||||
atr_map = {'minutes': config.get('atr_minutes', 12)}
|
||||
mult_map = {'minutes': config.get('mult_minutes', 3.8)}
|
||||
|
||||
# Create inputs with mapped parameters
|
||||
inputs = MoneyLineV9Inputs(
|
||||
atr_period=atr_map[profile],
|
||||
multiplier=mult_map[profile],
|
||||
rsi_long_min=config['rsi_long_min'],
|
||||
rsi_long_max=config['rsi_long_max'],
|
||||
rsi_short_min=config['rsi_short_min'],
|
||||
rsi_short_max=config['rsi_short_max'],
|
||||
vol_max=config['vol_max'],
|
||||
entry_buffer_atr=config['entry_buffer'],
|
||||
adx_length=config['adx_length'],
|
||||
use_ma_gap_filter=config['use_ma_gap'],
|
||||
ma_gap_long_min=config['ma_gap_min_long'],
|
||||
ma_gap_short_max=config['ma_gap_min_short']
|
||||
)
|
||||
|
||||
print(f"✅ MoneyLineV9Inputs created successfully!")
|
||||
print(f" atr_period={inputs.atr_period}, multiplier={inputs.multiplier}")
|
||||
print(f" use_ma_gap_filter={inputs.use_ma_gap_filter}")
|
||||
14
cluster/test_chunk_load.py
Normal file
14
cluster/test_chunk_load.py
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Test loading chunk file"""
|
||||
import json
|
||||
import sys
|
||||
|
||||
# Test loading chunk file
|
||||
with open('chunks/v9_advanced_chunk_0002.json', 'r') as f:
|
||||
chunk_data = json.load(f)
|
||||
|
||||
print(f"✅ Chunk file loaded successfully")
|
||||
print(f" Chunk ID: {chunk_data['chunk_id']}")
|
||||
print(f" Configs: {len(chunk_data['configs'])}")
|
||||
print(f" First config keys: {list(chunk_data['configs'][0].keys())}")
|
||||
print(f" First config profile: {chunk_data['configs'][0]['profile']}")
|
||||
@@ -88,27 +88,14 @@ def launch_worker(chunk_id: str, worker_name: str):
|
||||
print(f"Command: {full_cmd}")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
full_cmd,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=3600 # 1 hour timeout per chunk
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
print(f"✓ {chunk_id} completed on {worker_name}")
|
||||
mark_complete(chunk_id)
|
||||
else:
|
||||
print(f"✗ {chunk_id} failed on {worker_name}")
|
||||
print(f"Error: {result.stderr}")
|
||||
mark_failed(chunk_id)
|
||||
# Launch in background with nohup so it doesn't block coordinator
|
||||
# Worker will create completion marker file when done
|
||||
bg_cmd = f"nohup {full_cmd} > /tmp/{chunk_id}.log 2>&1 &"
|
||||
subprocess.Popen(bg_cmd, shell=True)
|
||||
print(f"✓ {chunk_id} launched on {worker_name} (background)")
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
print(f"⚠️ {chunk_id} timed out on {worker_name}")
|
||||
mark_failed(chunk_id)
|
||||
except Exception as e:
|
||||
print(f"❌ Error running {chunk_id} on {worker_name}: {e}")
|
||||
print(f"❌ Error launching {chunk_id} on {worker_name}: {e}")
|
||||
mark_failed(chunk_id)
|
||||
|
||||
def mark_complete(chunk_id: str):
|
||||
@@ -141,6 +128,47 @@ def mark_failed(chunk_id: str):
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def check_completions():
|
||||
"""Check for completed chunks by looking for output files"""
|
||||
conn = sqlite3.connect(DB_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Get all running chunks
|
||||
cursor.execute("""
|
||||
SELECT id, assigned_worker FROM v9_advanced_chunks
|
||||
WHERE status = 'running'
|
||||
""")
|
||||
|
||||
running_chunks = cursor.fetchall()
|
||||
|
||||
for chunk_id, worker_name in running_chunks:
|
||||
output_file = Path(f"distributed_results/{chunk_id}_results.csv")
|
||||
|
||||
# Check if output file exists locally (would be copied back)
|
||||
if output_file.exists():
|
||||
print(f"✓ {chunk_id} completed (found output file)")
|
||||
mark_complete(chunk_id)
|
||||
else:
|
||||
# Check on remote worker
|
||||
worker = WORKERS[worker_name]
|
||||
remote_output = f"{worker['workspace']}/distributed_results/{chunk_id}_results.csv"
|
||||
|
||||
if 'ssh_hop' in worker:
|
||||
check_cmd = f"ssh {worker['ssh_hop']} \"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'\""
|
||||
else:
|
||||
check_cmd = f"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'"
|
||||
|
||||
try:
|
||||
result = subprocess.run(check_cmd, shell=True, capture_output=True, text=True, timeout=10)
|
||||
if result.stdout.strip() == 'EXISTS':
|
||||
print(f"✓ {chunk_id} completed on {worker_name}")
|
||||
mark_complete(chunk_id)
|
||||
except Exception as e:
|
||||
# If check fails, chunk might still be running
|
||||
pass
|
||||
|
||||
conn.close()
|
||||
|
||||
def main():
|
||||
"""Main coordinator loop"""
|
||||
print("="*60)
|
||||
@@ -175,6 +203,9 @@ def main():
|
||||
|
||||
print(f"Status: {completed} completed, {running} running, {pending} pending")
|
||||
|
||||
# Check for completed chunks
|
||||
check_completions()
|
||||
|
||||
if pending == 0 and running == 0:
|
||||
print("\n✓ All chunks completed!")
|
||||
break
|
||||
|
||||
Reference in New Issue
Block a user