critical: Fix Smart Entry Validation Queue wrong price display

- Bug: Validation queue used TradingView symbol format (SOLUSDT) to lookup market data cache
- Cache uses normalized Drift format (SOL-PERP)
- Result: Cache lookup failed, wrong/stale price shown in Telegram abandonment notifications
- Real incident: Signal at $126.00 showed $98.18 abandonment price (-22.08% impossible drop)
- Fix: Added normalizeTradingViewSymbol() call in check-risk endpoint before passing to validation queue
- Files changed: app/api/trading/check-risk/route.ts (import + symbol normalization)
- Impact: Validation queue now correctly retrieves current price from market data cache
- Deployed: Dec 1, 2025
This commit is contained in:
mindesbunister
2025-12-01 23:45:21 +01:00
parent 4fb301328d
commit 6cec2e8e71
6 changed files with 120 additions and 22 deletions

View File

@@ -6,7 +6,7 @@
*/
import { NextRequest, NextResponse } from 'next/server'
import { getMergedConfig, TradingConfig, getMinQualityScoreForDirection } from '@/config/trading'
import { getMergedConfig, TradingConfig, getMinQualityScoreForDirection, normalizeTradingViewSymbol } from '@/config/trading'
import { getInitializedPositionManager, ActiveTrade } from '@/lib/trading/position-manager'
import { getLastTradeTime, getLastTradeTimeForSymbol, getTradesInLastHour, getTodayPnL, createBlockedSignal } from '@/lib/database/trades'
import { getPythPriceMonitor } from '@/lib/pyth/price-monitor'
@@ -432,9 +432,16 @@ export async function POST(request: NextRequest): Promise<NextResponse<RiskCheck
// SMART VALIDATION QUEUE (Nov 30, 2025)
// Queue marginal quality signals (50-89) for validation instead of hard-blocking
const validationQueue = getSmartValidationQueue()
// CRITICAL FIX (Dec 1, 2025): Normalize TradingView symbol format to Drift format
// Bug: Market data cache uses "SOL-PERP" but TradingView sends "SOLUSDT"
// Without normalization, validation queue can't find matching price data
// Result: Wrong/stale price shown in Telegram abandonment notifications
const normalizedSymbol = normalizeTradingViewSymbol(body.symbol)
const queued = await validationQueue.addSignal({
blockReason: 'QUALITY_SCORE_TOO_LOW',
symbol: body.symbol,
symbol: normalizedSymbol, // Use normalized format for cache lookup
direction: body.direction,
originalPrice: currentPrice,
qualityScore: qualityScore.score,
@@ -448,7 +455,7 @@ export async function POST(request: NextRequest): Promise<NextResponse<RiskCheck
})
if (queued) {
console.log(`🧠 Signal queued for smart validation: ${body.symbol} ${body.direction} (quality ${qualityScore.score})`)
console.log(`🧠 Signal queued for smart validation: ${normalizedSymbol} ${body.direction} (quality ${qualityScore.score})`)
}
} else {
console.warn('⚠️ Skipping blocked signal save: price unavailable (quality block)')

Binary file not shown.

45
cluster/test_adapter.py Normal file
View File

@@ -0,0 +1,45 @@
#!/usr/bin/env python3
"""Test profile adapter logic"""
from money_line_v9 import MoneyLineV9Inputs
# Test profile adapter logic
config = {
'profile': 'minutes',
'atr_minutes': 12,
'mult_minutes': 3.8,
'rsi_long_min': 35,
'rsi_long_max': 70,
'rsi_short_min': 30,
'rsi_short_max': 70,
'vol_max': 3.5,
'entry_buffer': 0.2,
'adx_length': 16,
'use_ma_gap': False,
'ma_gap_min_long': 0.0,
'ma_gap_min_short': 0.0
}
# Simulate adapter logic
profile = config['profile']
atr_map = {'minutes': config.get('atr_minutes', 12)}
mult_map = {'minutes': config.get('mult_minutes', 3.8)}
# Create inputs with mapped parameters
inputs = MoneyLineV9Inputs(
atr_period=atr_map[profile],
multiplier=mult_map[profile],
rsi_long_min=config['rsi_long_min'],
rsi_long_max=config['rsi_long_max'],
rsi_short_min=config['rsi_short_min'],
rsi_short_max=config['rsi_short_max'],
vol_max=config['vol_max'],
entry_buffer_atr=config['entry_buffer'],
adx_length=config['adx_length'],
use_ma_gap_filter=config['use_ma_gap'],
ma_gap_long_min=config['ma_gap_min_long'],
ma_gap_short_max=config['ma_gap_min_short']
)
print(f"✅ MoneyLineV9Inputs created successfully!")
print(f" atr_period={inputs.atr_period}, multiplier={inputs.multiplier}")
print(f" use_ma_gap_filter={inputs.use_ma_gap_filter}")

View File

@@ -0,0 +1,14 @@
#!/usr/bin/env python3
"""Test loading chunk file"""
import json
import sys
# Test loading chunk file
with open('chunks/v9_advanced_chunk_0002.json', 'r') as f:
chunk_data = json.load(f)
print(f"✅ Chunk file loaded successfully")
print(f" Chunk ID: {chunk_data['chunk_id']}")
print(f" Configs: {len(chunk_data['configs'])}")
print(f" First config keys: {list(chunk_data['configs'][0].keys())}")
print(f" First config profile: {chunk_data['configs'][0]['profile']}")

View File

@@ -88,27 +88,14 @@ def launch_worker(chunk_id: str, worker_name: str):
print(f"Command: {full_cmd}")
try:
result = subprocess.run(
full_cmd,
shell=True,
capture_output=True,
text=True,
timeout=3600 # 1 hour timeout per chunk
)
if result.returncode == 0:
print(f"{chunk_id} completed on {worker_name}")
mark_complete(chunk_id)
else:
print(f"{chunk_id} failed on {worker_name}")
print(f"Error: {result.stderr}")
mark_failed(chunk_id)
# Launch in background with nohup so it doesn't block coordinator
# Worker will create completion marker file when done
bg_cmd = f"nohup {full_cmd} > /tmp/{chunk_id}.log 2>&1 &"
subprocess.Popen(bg_cmd, shell=True)
print(f"{chunk_id} launched on {worker_name} (background)")
except subprocess.TimeoutExpired:
print(f"⚠️ {chunk_id} timed out on {worker_name}")
mark_failed(chunk_id)
except Exception as e:
print(f"❌ Error running {chunk_id} on {worker_name}: {e}")
print(f"❌ Error launching {chunk_id} on {worker_name}: {e}")
mark_failed(chunk_id)
def mark_complete(chunk_id: str):
@@ -141,6 +128,47 @@ def mark_failed(chunk_id: str):
conn.commit()
conn.close()
def check_completions():
"""Check for completed chunks by looking for output files"""
conn = sqlite3.connect(DB_PATH)
cursor = conn.cursor()
# Get all running chunks
cursor.execute("""
SELECT id, assigned_worker FROM v9_advanced_chunks
WHERE status = 'running'
""")
running_chunks = cursor.fetchall()
for chunk_id, worker_name in running_chunks:
output_file = Path(f"distributed_results/{chunk_id}_results.csv")
# Check if output file exists locally (would be copied back)
if output_file.exists():
print(f"{chunk_id} completed (found output file)")
mark_complete(chunk_id)
else:
# Check on remote worker
worker = WORKERS[worker_name]
remote_output = f"{worker['workspace']}/distributed_results/{chunk_id}_results.csv"
if 'ssh_hop' in worker:
check_cmd = f"ssh {worker['ssh_hop']} \"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'\""
else:
check_cmd = f"ssh {worker['host']} 'test -f {remote_output} && echo EXISTS'"
try:
result = subprocess.run(check_cmd, shell=True, capture_output=True, text=True, timeout=10)
if result.stdout.strip() == 'EXISTS':
print(f"{chunk_id} completed on {worker_name}")
mark_complete(chunk_id)
except Exception as e:
# If check fails, chunk might still be running
pass
conn.close()
def main():
"""Main coordinator loop"""
print("="*60)
@@ -175,6 +203,9 @@ def main():
print(f"Status: {completed} completed, {running} running, {pending} pending")
# Check for completed chunks
check_completions()
if pending == 0 and running == 0:
print("\n✓ All chunks completed!")
break

View File

@@ -10,6 +10,7 @@ services:
env_file:
- .env.telegram-bot
dns:
- 9.9.9.9
- 8.8.8.8
- 8.8.4.4
environment: