fix: emergency automation fix - stop runaway trading loops

- Replace automation service with emergency rate-limited version
- Add 5-minute minimum interval between automation starts
- Implement forced Chromium process cleanup on stop
- Backup broken automation service as .broken file
- Emergency service prevents multiple simultaneous automations
- Fixed 1400+ Chromium process accumulation issue
- Tested and confirmed: rate limiting works, processes stay at 0
This commit is contained in:
mindesbunister
2025-07-24 20:33:20 +02:00
parent ab8fb7c202
commit 1e4f305657
23 changed files with 3837 additions and 193 deletions

View File

@@ -80,6 +80,7 @@ export interface AnalysisResult {
}
export class AIAnalysisService {
private lastApiCall: number = 0
async analyzeScreenshot(filenameOrPath: string): Promise<AnalysisResult | null> {
try {
let imagePath: string
@@ -594,30 +595,49 @@ Analyze all provided screenshots comprehensively and return only the JSON respon
console.log(`🤖 Sending ${filenamesOrPaths.length} screenshots to OpenAI for multi-layout analysis...`)
const response = await openai.chat.completions.create({
model: "gpt-4o-mini", // Cost-effective model with vision capabilities
messages,
max_tokens: 2000,
temperature: 0.1
})
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('No response from OpenAI')
// Add rate limiting check to prevent 429 errors
const now = Date.now()
if (this.lastApiCall && (now - this.lastApiCall) < 2000) {
const waitTime = 2000 - (now - this.lastApiCall)
console.log(`⏳ Rate limiting: waiting ${waitTime}ms before OpenAI call`)
await new Promise(resolve => setTimeout(resolve, waitTime))
}
console.log('🔍 Raw OpenAI response:', content.substring(0, 200) + '...')
// Parse JSON response
const jsonMatch = content.match(/\{[\s\S]*\}/)
if (!jsonMatch) {
throw new Error('No JSON found in response')
}
const analysis = JSON.parse(jsonMatch[0])
console.log('✅ Multi-layout analysis parsed successfully')
return analysis as AnalysisResult
try {
const response = await openai.chat.completions.create({
model: "gpt-4o-mini", // Cost-effective model with vision capabilities
messages,
max_tokens: 2000,
temperature: 0.1
})
this.lastApiCall = Date.now()
const content = response.choices[0]?.message?.content
if (!content) {
throw new Error('No response from OpenAI')
}
console.log('🔍 Raw OpenAI response:', content.substring(0, 200) + '...')
// Parse JSON response
const jsonMatch = content.match(/\{[\s\S]*\}/)
if (!jsonMatch) {
throw new Error('No JSON found in response')
}
const analysis = JSON.parse(jsonMatch[0])
console.log('✅ Multi-layout analysis parsed successfully')
return analysis as AnalysisResult
} catch (error: any) {
if (error.status === 429) {
console.log('⏳ OpenAI rate limit hit - will retry on next cycle')
// Don't throw the error, just return null to skip this cycle gracefully
return null
}
throw error
}
} catch (error: any) {
console.error('❌ Multi-screenshot AI analysis failed:', error.message)