Fix chat interface - restore continuous conversation flow

🎯 Major improvements to MissionControl component:
- Always keep input field visible and functional after AI responses
- Auto-clear input after submitting questions for better UX
- Add dynamic visual indicators (first question vs follow-up)
- Improve response layout with clear separation and hints
- Enable proper chat-like experience for continuous learning

🌟 Additional enhancements:
- Better language-specific messaging throughout interface
- Clearer visual hierarchy between input and response areas
- Intuitive flow that guides users to ask follow-up questions
- Maintains responsive design and accessibility

🔧 Technical changes:
- Enhanced MissionControl state management
- Improved component layout and styling
- Better TypeScript integration across components
- Updated tsconfig for stricter type checking
This commit is contained in:
rwiegand
2025-07-14 12:39:05 +02:00
parent b31492a354
commit f893530471
1798 changed files with 25329 additions and 92638 deletions

View File

@@ -1,13 +1,13 @@
import type { CompletionUsage } from "../resources/completions.js";
import type { ChatCompletion, ChatCompletionMessage, ChatCompletionMessageParam, ChatCompletionCreateParams, ChatCompletionMessageToolCall } from "../resources/chat/completions.js";
import * as Core from "../core.js";
import { type CompletionUsage } from "../resources/completions.js";
import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams } from "../resources/chat/completions.js";
import { type BaseFunctionsArgs } from "./RunnableFunction.js";
import type { ChatCompletionToolRunnerParams } from "./ChatCompletionRunner.js";
import type { ChatCompletionStreamingToolRunnerParams } from "./ChatCompletionStreamingRunner.js";
import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from "./ChatCompletionRunner.js";
import { ChatCompletionStreamingFunctionRunnerParams, ChatCompletionStreamingToolRunnerParams } from "./ChatCompletionStreamingRunner.js";
import { BaseEvents, EventStream } from "./EventStream.js";
import type { ParsedChatCompletion } from "../resources/chat/completions.js";
import type OpenAI from "../index.js";
import type { RequestOptions } from "../internal/request-options.js";
export interface RunnerOptions extends RequestOptions {
import { ParsedChatCompletion } from "../resources/beta/chat/completions.js";
import OpenAI from "../index.js";
export interface RunnerOptions extends Core.RequestOptions {
/** How many requests to make before canceling. Default 10. */
maxChatCompletions?: number;
}
@@ -36,25 +36,26 @@ export declare class AbstractChatCompletionRunner<EventTypes extends AbstractCha
* @returns a promise that resolves with the content of the final FunctionCall, or rejects
* if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage.
*/
finalFunctionToolCall(): Promise<ChatCompletionMessageToolCall.Function | undefined>;
finalFunctionToolCallResult(): Promise<string | undefined>;
finalFunctionCall(): Promise<ChatCompletionMessage.FunctionCall | undefined>;
finalFunctionCallResult(): Promise<string | undefined>;
totalUsage(): Promise<CompletionUsage>;
allChatCompletions(): ChatCompletion[];
protected _emitFinal(this: AbstractChatCompletionRunner<AbstractChatCompletionRunnerEvents, ParsedT>): void;
protected _createChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: RequestOptions): Promise<ParsedChatCompletion<ParsedT>>;
protected _runChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: RequestOptions): Promise<ChatCompletion>;
protected _createChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ParsedChatCompletion<ParsedT>>;
protected _runChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise<ChatCompletion>;
protected _runFunctions<FunctionsArgs extends BaseFunctionsArgs>(client: OpenAI, params: ChatCompletionFunctionRunnerParams<FunctionsArgs> | ChatCompletionStreamingFunctionRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
protected _runTools<FunctionsArgs extends BaseFunctionsArgs>(client: OpenAI, params: ChatCompletionToolRunnerParams<FunctionsArgs> | ChatCompletionStreamingToolRunnerParams<FunctionsArgs>, options?: RunnerOptions): Promise<void>;
}
export interface AbstractChatCompletionRunnerEvents extends BaseEvents {
functionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void;
functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
message: (message: ChatCompletionMessageParam) => void;
chatCompletion: (completion: ChatCompletion) => void;
finalContent: (contentSnapshot: string) => void;
finalMessage: (message: ChatCompletionMessageParam) => void;
finalChatCompletion: (completion: ChatCompletion) => void;
finalFunctionToolCall: (functionCall: ChatCompletionMessageToolCall.Function) => void;
functionToolCallResult: (content: string) => void;
finalFunctionToolCallResult: (content: string) => void;
finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void;
functionCallResult: (content: string) => void;
finalFunctionCallResult: (content: string) => void;
totalUsage: (usage: CompletionUsage) => void;
}
//# sourceMappingURL=AbstractChatCompletionRunner.d.ts.map