LEARNING INTEGRATION: - Enhanced AI analysis service feeds historical data into OpenAI prompts - Symbol/timeframe specific learning optimization - Pattern recognition from past trade outcomes - Confidence adjustment based on success rates HTTP COMPATIBILITY SYSTEM: - HttpUtil with automatic curl/no-curl detection - Node.js fallback for Docker environments without curl - Updated all automation systems to use HttpUtil - Production-ready error handling AUTONOMOUS RISK MANAGEMENT: - Enhanced risk manager with learning integration - Simplified learners using existing AILearningData schema - Real-time position monitoring every 30 seconds - Smart stop-loss decisions with AI learning INFRASTRUCTURE: - Database utility for shared Prisma connections - Beach mode status display system - Complete error handling and recovery - Docker container compatibility tested Historical performance flows into OpenAI prompts before every trade.
277 lines
8.2 KiB
JavaScript
277 lines
8.2 KiB
JavaScript
#!/usr/bin/env node
|
|
|
|
/**
|
|
* Simplified Risk Reward Learning System
|
|
*
|
|
* Uses existing AILearningData schema for R/R learning integration
|
|
*/
|
|
|
|
const { getDB } = require('./db');
|
|
|
|
class SimplifiedRiskRewardLearner {
|
|
constructor() {
|
|
this.setupHistory = [];
|
|
this.patterns = {
|
|
optimal_rr_ratios: {},
|
|
market_condition_adjustments: {},
|
|
symbol_specific_patterns: {}
|
|
};
|
|
}
|
|
|
|
async log(message) {
|
|
const timestamp = new Date().toISOString();
|
|
console.log(`[${timestamp}] 🧠 RR Learner: ${message}`);
|
|
}
|
|
|
|
/**
|
|
* Record a risk/reward setup for learning
|
|
*/
|
|
async recordSetup(setupData) {
|
|
try {
|
|
const setup = {
|
|
userId: 'system',
|
|
analysisData: {
|
|
type: 'RISK_REWARD_SETUP',
|
|
entryPrice: setupData.entryPrice,
|
|
stopLoss: setupData.stopLoss,
|
|
takeProfit: setupData.takeProfit,
|
|
riskRewardRatio: setupData.riskRewardRatio,
|
|
confidence: setupData.confidence,
|
|
marketConditions: setupData.marketConditions || {},
|
|
reasoning: setupData.reasoning,
|
|
timestamp: new Date().toISOString()
|
|
},
|
|
marketConditions: setupData.marketConditions || {},
|
|
timeframe: setupData.timeframe || '1h',
|
|
symbol: setupData.symbol || 'SOLUSD'
|
|
};
|
|
|
|
const prisma = await getDB();
|
|
const record = await prisma.aILearningData.create({
|
|
data: setup
|
|
});
|
|
|
|
await this.log(`📝 Recorded R/R setup ${record.id}: ${setupData.riskRewardRatio}:1 ratio`);
|
|
this.setupHistory.push(setup);
|
|
return record.id;
|
|
|
|
} catch (error) {
|
|
await this.log(`❌ Error recording R/R setup: ${error.message}`);
|
|
return null;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Update setup outcome for learning
|
|
*/
|
|
async updateSetupOutcome(setupId, outcomeData) {
|
|
try {
|
|
const prisma = await getDB();
|
|
await prisma.aILearningData.update({
|
|
where: { id: setupId },
|
|
data: {
|
|
outcome: outcomeData.outcome,
|
|
actualPrice: outcomeData.finalPrice,
|
|
feedbackData: {
|
|
outcome: outcomeData.outcome,
|
|
actualRR: outcomeData.actualRR,
|
|
pnlPercent: outcomeData.pnlPercent,
|
|
duration: outcomeData.duration,
|
|
hitTarget: outcomeData.hitTarget,
|
|
hitStopLoss: outcomeData.hitStopLoss,
|
|
marketConditions: outcomeData.marketConditions
|
|
},
|
|
updatedAt: new Date()
|
|
}
|
|
});
|
|
|
|
await this.log(`✅ Updated R/R setup ${setupId} with outcome: ${outcomeData.outcome}`);
|
|
} catch (error) {
|
|
await this.log(`❌ Error updating setup outcome: ${error.message}`);
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Analyze historical setups for optimal R/R ratios
|
|
*/
|
|
async analyzeOptimalRatios(symbol = 'SOLUSD', timeframe = '1h') {
|
|
try {
|
|
const prisma = await getDB();
|
|
const setups = await prisma.aILearningData.findMany({
|
|
where: {
|
|
symbol,
|
|
timeframe,
|
|
analysisData: {
|
|
string_contains: '"type":"RISK_REWARD_SETUP"'
|
|
},
|
|
outcome: {
|
|
not: null
|
|
}
|
|
},
|
|
orderBy: { createdAt: 'desc' },
|
|
take: 100
|
|
});
|
|
|
|
if (setups.length === 0) {
|
|
await this.log(`📊 No R/R setups found for ${symbol} ${timeframe}`);
|
|
return {
|
|
optimalRatio: 3.0,
|
|
confidence: 0.5,
|
|
sampleSize: 0
|
|
};
|
|
}
|
|
|
|
// Analyze successful setups
|
|
const successfulSetups = setups.filter(s =>
|
|
s.outcome === 'PROFIT' || s.feedbackData?.hitTarget
|
|
);
|
|
|
|
const failedSetups = setups.filter(s =>
|
|
s.outcome === 'LOSS' || s.feedbackData?.hitStopLoss
|
|
);
|
|
|
|
if (successfulSetups.length === 0) {
|
|
await this.log(`📊 No successful setups found for analysis`);
|
|
return {
|
|
optimalRatio: 3.0,
|
|
confidence: 0.3,
|
|
sampleSize: setups.length
|
|
};
|
|
}
|
|
|
|
// Calculate average successful R/R ratio
|
|
const successfulRatios = successfulSetups
|
|
.map(s => s.analysisData?.riskRewardRatio || 3.0)
|
|
.filter(ratio => ratio > 0 && ratio < 20); // Filter outliers
|
|
|
|
const avgSuccessfulRatio = successfulRatios.reduce((a, b) => a + b, 0) / successfulRatios.length;
|
|
|
|
// Calculate win rate
|
|
const winRate = successfulSetups.length / setups.length;
|
|
|
|
// Adjust optimal ratio based on win rate
|
|
let optimalRatio = avgSuccessfulRatio;
|
|
if (winRate < 0.4) {
|
|
optimalRatio = Math.max(avgSuccessfulRatio, 3.5); // Need higher R/R if win rate is low
|
|
} else if (winRate > 0.7) {
|
|
optimalRatio = Math.max(avgSuccessfulRatio * 0.8, 2.0); // Can use lower R/R if win rate is high
|
|
}
|
|
|
|
const confidence = Math.min(0.9, 0.3 + (setups.length * 0.01) + (winRate * 0.3));
|
|
|
|
await this.log(`📊 Analyzed ${setups.length} setups. Win rate: ${Math.round(winRate * 100)}%, Optimal R/R: ${optimalRatio.toFixed(1)}:1`);
|
|
|
|
return {
|
|
optimalRatio: Number(optimalRatio.toFixed(1)),
|
|
confidence,
|
|
sampleSize: setups.length,
|
|
winRate,
|
|
avgSuccessfulRatio: Number(avgSuccessfulRatio.toFixed(1))
|
|
};
|
|
|
|
} catch (error) {
|
|
await this.log(`❌ Error analyzing optimal ratios: ${error.message}`);
|
|
return {
|
|
optimalRatio: 3.0,
|
|
confidence: 0.5,
|
|
sampleSize: 0
|
|
};
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Generate smart R/R recommendation
|
|
*/
|
|
async generateRRRecommendation(marketData) {
|
|
try {
|
|
const { symbol, timeframe, currentPrice, marketConditions } = marketData;
|
|
|
|
const analysis = await this.analyzeOptimalRatios(symbol, timeframe);
|
|
|
|
// Adjust based on market conditions
|
|
let adjustedRatio = analysis.optimalRatio;
|
|
|
|
if (marketConditions?.volatility === 'HIGH') {
|
|
adjustedRatio *= 1.2; // Increase R/R in high volatility
|
|
} else if (marketConditions?.volatility === 'LOW') {
|
|
adjustedRatio *= 0.9; // Decrease R/R in low volatility
|
|
}
|
|
|
|
if (marketConditions?.trend === 'STRONG_BULLISH' || marketConditions?.trend === 'STRONG_BEARISH') {
|
|
adjustedRatio *= 0.8; // Lower R/R in strong trends (higher probability)
|
|
}
|
|
|
|
// Suggest levels based on the ratio
|
|
const defaultStopLossPercent = 2.0; // 2% default stop loss
|
|
const takeProfitPercent = defaultStopLossPercent * adjustedRatio;
|
|
|
|
await this.log(`🎯 R/R Recommendation: ${adjustedRatio.toFixed(1)}:1 ratio (${analysis.confidence * 100}% confidence)`);
|
|
|
|
return {
|
|
riskRewardRatio: Number(adjustedRatio.toFixed(1)),
|
|
stopLossPercent: defaultStopLossPercent,
|
|
takeProfitPercent: Number(takeProfitPercent.toFixed(1)),
|
|
confidence: analysis.confidence,
|
|
reasoning: `Based on ${analysis.sampleSize} historical setups with ${Math.round((analysis.winRate || 0.5) * 100)}% win rate`,
|
|
marketAdjustment: adjustedRatio !== analysis.optimalRatio
|
|
};
|
|
|
|
} catch (error) {
|
|
await this.log(`❌ Error generating R/R recommendation: ${error.message}`);
|
|
return {
|
|
riskRewardRatio: 3.0,
|
|
stopLossPercent: 2.0,
|
|
takeProfitPercent: 6.0,
|
|
confidence: 0.5,
|
|
reasoning: `Default R/R setup - learning system error: ${error.message}`,
|
|
marketAdjustment: false
|
|
};
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Get learning status
|
|
*/
|
|
async getLearningStatus() {
|
|
try {
|
|
const prisma = await getDB();
|
|
const totalSetups = await prisma.aILearningData.count({
|
|
where: {
|
|
analysisData: {
|
|
string_contains: '"type":"RISK_REWARD_SETUP"'
|
|
}
|
|
}
|
|
});
|
|
|
|
const recentSetups = await prisma.aILearningData.count({
|
|
where: {
|
|
analysisData: {
|
|
string_contains: '"type":"RISK_REWARD_SETUP"'
|
|
},
|
|
createdAt: {
|
|
gte: new Date(Date.now() - 24 * 60 * 60 * 1000)
|
|
}
|
|
}
|
|
});
|
|
|
|
return {
|
|
totalSetups,
|
|
recentSetups,
|
|
patterns: this.patterns,
|
|
isActive: totalSetups > 0
|
|
};
|
|
|
|
} catch (error) {
|
|
await this.log(`❌ Error getting R/R learning status: ${error.message}`);
|
|
return {
|
|
totalSetups: 0,
|
|
recentSetups: 0,
|
|
patterns: this.patterns,
|
|
isActive: false
|
|
};
|
|
}
|
|
}
|
|
}
|
|
|
|
module.exports = SimplifiedRiskRewardLearner;
|