**Documentation Structure:** - Created docs/ subdirectory organization (analysis/, architecture/, bugs/, cluster/, deployments/, roadmaps/, setup/, archived/) - Moved 68 root markdown files to appropriate categories - Root directory now clean (only README.md remains) - Total: 83 markdown files now organized by purpose **New Content:** - Added comprehensive Environment Variable Reference to copilot-instructions.md - 100+ ENV variables documented with types, defaults, purpose, notes - Organized by category: Required (Drift/RPC/Pyth), Trading Config (quality/ leverage/sizing), ATR System, Runner System, Risk Limits, Notifications, etc. - Includes usage examples (correct vs wrong patterns) **File Distribution:** - docs/analysis/ - Performance analyses, blocked signals, profit projections - docs/architecture/ - Adaptive leverage, ATR trailing, indicator tracking - docs/bugs/ - CRITICAL_*.md, FIXES_*.md bug reports (7 files) - docs/cluster/ - EPYC setup, distributed computing docs (3 files) - docs/deployments/ - *_COMPLETE.md, DEPLOYMENT_*.md status (12 files) - docs/roadmaps/ - All *ROADMAP*.md strategic planning files (7 files) - docs/setup/ - TradingView guides, signal quality, n8n setup (8 files) - docs/archived/2025_pre_nov/ - Obsolete verification checklist (1 file) **Key Improvements:** - ENV variable reference: Single source of truth for all configuration - Common Pitfalls #68-71: Already complete, verified during audit - Better findability: Category-based navigation vs 68 files in root - Preserves history: All files git mv (rename), not copy/delete - Zero broken functionality: Only documentation moved, no code changes **Verification:** - 83 markdown files now in docs/ subdirectories - Root directory cleaned: 68 files → 0 files (except README.md) - Git history preserved for all moved files - Container running: trading-bot-v4 (no restart needed) **Next Steps:** - Create README.md files in each docs subdirectory - Add navigation index - Update main README.md with new structure - Consolidate duplicate deployment docs - Archive truly obsolete files (old SQL backups) See: docs/analysis/CLEANUP_PLAN.md for complete reorganization strategy
286 lines
9.9 KiB
TypeScript
286 lines
9.9 KiB
TypeScript
import { NextRequest, NextResponse } from 'next/server'
|
|
import { exec } from 'child_process'
|
|
import { promisify } from 'util'
|
|
import sqlite3 from 'sqlite3'
|
|
import { open, Database } from 'sqlite'
|
|
import path from 'path'
|
|
|
|
const execAsync = promisify(exec)
|
|
|
|
export const dynamic = 'force-dynamic'
|
|
|
|
interface WorkerStatus {
|
|
name: string
|
|
host: string
|
|
cpuUsage: number
|
|
loadAverage: string
|
|
activeProcesses: number
|
|
status: 'active' | 'idle' | 'offline'
|
|
}
|
|
|
|
interface ChunkResult {
|
|
rank: number
|
|
pnl_per_1k: number
|
|
win_rate: number
|
|
trades: number
|
|
profit_factor: number
|
|
max_drawdown: number
|
|
params: {
|
|
flip_threshold: number
|
|
ma_gap: number
|
|
adx_min: number
|
|
long_pos_max: number
|
|
short_pos_min: number
|
|
}
|
|
}
|
|
|
|
async function getWorkerStatus(workerName: string, sshCommand: string): Promise<WorkerStatus> {
|
|
try {
|
|
// Get CPU usage
|
|
const cpuCmd = `${sshCommand} "top -bn1 | grep 'Cpu(s)' | awk '{print 100-\\$8}'"`
|
|
const { stdout: cpuOut } = await execAsync(cpuCmd)
|
|
const cpuUsage = parseFloat(cpuOut.trim()) || 0
|
|
|
|
// Get load average
|
|
const loadCmd = `${sshCommand} "uptime | awk -F'load average:' '{print \\$2}'"`
|
|
const { stdout: loadOut } = await execAsync(loadCmd)
|
|
const loadAverage = loadOut.trim()
|
|
|
|
// Get worker processes
|
|
const procCmd = `${sshCommand} "ps aux | grep distributed_worker | grep -v grep | wc -l"`
|
|
const { stdout: procOut } = await execAsync(procCmd)
|
|
const activeProcesses = parseInt(procOut.trim()) || 0
|
|
|
|
const status: 'active' | 'idle' | 'offline' =
|
|
activeProcesses > 0 ? 'active' :
|
|
cpuUsage > 10 ? 'active' : 'idle'
|
|
|
|
return {
|
|
name: workerName,
|
|
host: sshCommand.includes('10.20.254.100') ? 'bd-host01 (32 cores)' : 'pve-nu-monitor01 (32 cores)',
|
|
cpuUsage,
|
|
loadAverage,
|
|
activeProcesses,
|
|
status
|
|
}
|
|
} catch (error) {
|
|
return {
|
|
name: workerName,
|
|
host: sshCommand.includes('10.20.254.100') ? 'bd-host01' : 'pve-nu-monitor01',
|
|
cpuUsage: 0,
|
|
loadAverage: 'N/A',
|
|
activeProcesses: 0,
|
|
status: 'offline'
|
|
}
|
|
}
|
|
}
|
|
|
|
async function getExplorationData() {
|
|
try {
|
|
const dbPath = path.join(process.cwd(), 'cluster', 'exploration.db')
|
|
|
|
const db = await open({
|
|
filename: dbPath,
|
|
driver: sqlite3.Database
|
|
})
|
|
|
|
// UPDATED (Dec 3, 2025): Track comprehensive v9_advanced sweep (1,693 chunks, 1.693M combos)
|
|
// Get total combos and chunk statistics from v9_advanced tables
|
|
// NOTE: v9_advanced_chunks uses start_combo/end_combo (not chunk_start/chunk_end)
|
|
const totalCombosRow = await db.get('SELECT SUM(total_combos) as total FROM v9_advanced_chunks')
|
|
const totalCombos = totalCombosRow?.total || 0
|
|
|
|
const chunks = await db.all('SELECT * FROM v9_advanced_chunks ORDER BY start_combo')
|
|
const completedChunks = chunks.filter(c => c.status === 'completed').length
|
|
const runningChunks = chunks.filter(c => c.status === 'running').length
|
|
const pendingChunks = chunks.filter(c => c.status === 'pending').length
|
|
|
|
// Try to get strategies from v9_advanced_strategies table
|
|
let strategies: any[] = []
|
|
let testedCombos = 0
|
|
|
|
try {
|
|
const strategiesCount = await db.get('SELECT COUNT(*) as count FROM v9_advanced_strategies')
|
|
testedCombos = strategiesCount?.count || 0
|
|
|
|
strategies = await db.all(`
|
|
SELECT * FROM v9_advanced_strategies
|
|
WHERE total_trades >= 700
|
|
ORDER BY pnl_per_1k DESC
|
|
LIMIT 10
|
|
`)
|
|
} catch (e) {
|
|
// Strategies table doesn't exist yet - this is fine
|
|
console.log('Strategies table not yet available')
|
|
}
|
|
|
|
await db.close()
|
|
|
|
const progress = totalCombos > 0 ? Math.round((testedCombos / totalCombos) * 100) : 0
|
|
|
|
return {
|
|
totalCombos,
|
|
testedCombos,
|
|
progress,
|
|
chunks: {
|
|
total: chunks.length,
|
|
completed: completedChunks,
|
|
running: runningChunks,
|
|
pending: pendingChunks
|
|
},
|
|
strategies
|
|
}
|
|
} catch (error) {
|
|
console.error('Error reading exploration database:', error)
|
|
return {
|
|
totalCombos: 0,
|
|
testedCombos: 0,
|
|
progress: 0,
|
|
chunks: { total: 0, completed: 0, running: 0, pending: 0 },
|
|
strategies: []
|
|
}
|
|
}
|
|
}
|
|
|
|
interface ChunkResult {
|
|
rank: number
|
|
pnl_per_1k: number
|
|
win_rate: number
|
|
trades: number
|
|
profit_factor: number
|
|
max_drawdown: number
|
|
params: {
|
|
flip_threshold: number
|
|
ma_gap: number
|
|
adx_min: number
|
|
long_pos_max: number
|
|
short_pos_min: number
|
|
}
|
|
}
|
|
|
|
function generateRecommendation(results: ChunkResult[]): string {
|
|
if (results.length === 0) {
|
|
return "Cluster is processing parameter combinations. Check back soon for optimization recommendations."
|
|
}
|
|
|
|
const best = results[0]
|
|
const avgWinRate = results.reduce((sum, r) => sum + r.win_rate, 0) / results.length
|
|
const avgPnL = results.reduce((sum, r) => sum + r.pnl_per_1k, 0) / results.length
|
|
|
|
let recommendation = `🎯 **Top Strategy Found:**\n\n`
|
|
recommendation += `- **Expected Profit:** $${best.pnl_per_1k.toFixed(2)} per $1,000 capital\n`
|
|
recommendation += `- **Win Rate:** ${(best.win_rate * 100).toFixed(1)}%\n`
|
|
recommendation += `- **Profit Factor:** ${best.profit_factor.toFixed(2)}x\n`
|
|
recommendation += `- **Max Drawdown:** $${Math.abs(best.max_drawdown).toFixed(2)}\n\n`
|
|
|
|
recommendation += `📊 **Optimal Parameters:**\n`
|
|
recommendation += `- Flip Threshold: ${best.params.flip_threshold}%\n`
|
|
recommendation += `- MA Gap: ${best.params.ma_gap}\n`
|
|
recommendation += `- Min ADX: ${best.params.adx_min}\n`
|
|
recommendation += `- Long Max Position: ${best.params.long_pos_max}%\n`
|
|
recommendation += `- Short Min Position: ${best.params.short_pos_min}%\n\n`
|
|
|
|
if (best.pnl_per_1k > avgPnL * 1.5) {
|
|
recommendation += `✅ **Action:** This strategy shows exceptional performance (${((best.pnl_per_1k / avgPnL) * 100 - 100).toFixed(0)}% better than average). Consider implementing these parameters in production.`
|
|
} else if (best.win_rate > 0.6) {
|
|
recommendation += `✅ **Action:** Strong win rate detected. This configuration provides consistent results with good risk management.`
|
|
} else {
|
|
recommendation += `⚠️ **Action:** Continue exploration. Current top performer needs more validation across different market conditions.`
|
|
}
|
|
|
|
return recommendation
|
|
}
|
|
|
|
export async function GET(request: NextRequest) {
|
|
try {
|
|
// CRITICAL FIX (Nov 30, 2025): Check database FIRST before SSH detection
|
|
// Database is the source of truth - SSH may timeout but workers are still running
|
|
const explorationData = await getExplorationData()
|
|
const hasRunningChunks = explorationData.chunks.running > 0
|
|
|
|
// Get status from both workers (SSH for supplementary metrics only)
|
|
const [worker1Status, worker2Status] = await Promise.all([
|
|
getWorkerStatus('worker1', 'ssh root@10.10.254.106'),
|
|
getWorkerStatus('worker2', 'ssh root@10.10.254.106 "ssh root@10.20.254.100"')
|
|
])
|
|
|
|
// If database shows running chunks but SSH shows offline, override to active
|
|
// This prevents false "idle" status when SSH detection times out
|
|
const workers = [worker1Status, worker2Status].map(w => {
|
|
if (hasRunningChunks && w.status === 'offline') {
|
|
console.log(`✅ ${w.name}: Database shows running chunks - overriding SSH offline to active`)
|
|
return {
|
|
...w,
|
|
status: 'active' as const,
|
|
activeProcesses: w.activeProcesses || 1 // Assume at least 1 process if chunks running
|
|
}
|
|
}
|
|
return w
|
|
})
|
|
|
|
const totalCPU = workers.reduce((sum, w) => sum + w.cpuUsage, 0) / workers.length
|
|
const totalProcesses = workers.reduce((sum, w) => sum + w.activeProcesses, 0)
|
|
const activeWorkers = workers.filter(w => w.status === 'active').length
|
|
|
|
// Determine cluster status: DATABASE-FIRST APPROACH
|
|
// If running chunks exist, cluster is active regardless of SSH detection
|
|
let clusterStatus: 'active' | 'idle' = 'idle'
|
|
if (hasRunningChunks) {
|
|
clusterStatus = 'active'
|
|
console.log('✅ Cluster status: ACTIVE (database shows running chunks)')
|
|
} else if (activeWorkers > 0) {
|
|
clusterStatus = 'active'
|
|
console.log('✅ Cluster status: ACTIVE (SSH detected active workers)')
|
|
} else {
|
|
console.log('⏸️ Cluster status: IDLE (no running chunks or active workers)')
|
|
}
|
|
|
|
// Convert strategies to ChunkResult format for recommendation
|
|
const topStrategies: ChunkResult[] = explorationData.strategies.map((s: any, idx: number) => ({
|
|
rank: idx + 1,
|
|
pnl_per_1k: s.pnl_per_1k || 0,
|
|
win_rate: s.win_rate || 0,
|
|
trades: s.total_trades || 0,
|
|
profit_factor: s.profit_factor || 0,
|
|
max_drawdown: s.max_drawdown || 0,
|
|
params: {
|
|
flip_threshold: s.flip_threshold || 0,
|
|
ma_gap: s.ma_gap || 0,
|
|
adx_min: s.momentum_adx || 0,
|
|
long_pos_max: s.momentum_long_pos || 0,
|
|
short_pos_min: s.momentum_short_pos || 0
|
|
}
|
|
}))
|
|
|
|
const recommendation = generateRecommendation(topStrategies)
|
|
|
|
return NextResponse.json({
|
|
cluster: {
|
|
totalCores: 64,
|
|
activeCores: Math.round(totalCPU * 0.64), // 70% of 64 cores
|
|
cpuUsage: totalCPU,
|
|
activeWorkers,
|
|
totalWorkers: 2,
|
|
workerProcesses: totalProcesses,
|
|
status: clusterStatus // Use database-aware status
|
|
},
|
|
workers,
|
|
exploration: {
|
|
totalCombinations: explorationData.totalCombos,
|
|
testedCombinations: explorationData.testedCombos,
|
|
progress: explorationData.progress,
|
|
chunks: explorationData.chunks
|
|
},
|
|
topStrategies: topStrategies.slice(0, 5),
|
|
recommendation,
|
|
lastUpdate: new Date().toISOString()
|
|
}, { status: 200 })
|
|
} catch (error) {
|
|
console.error('Error fetching cluster status:', error)
|
|
return NextResponse.json({
|
|
error: 'Failed to fetch cluster status',
|
|
details: error instanceof Error ? error.message : 'Unknown error'
|
|
}, { status: 500 })
|
|
}
|
|
}
|