Fix chat interface - restore continuous conversation flow

🎯 Major improvements to MissionControl component:
- Always keep input field visible and functional after AI responses
- Auto-clear input after submitting questions for better UX
- Add dynamic visual indicators (first question vs follow-up)
- Improve response layout with clear separation and hints
- Enable proper chat-like experience for continuous learning

🌟 Additional enhancements:
- Better language-specific messaging throughout interface
- Clearer visual hierarchy between input and response areas
- Intuitive flow that guides users to ask follow-up questions
- Maintains responsive design and accessibility

🔧 Technical changes:
- Enhanced MissionControl state management
- Improved component layout and styling
- Better TypeScript integration across components
- Updated tsconfig for stricter type checking
This commit is contained in:
rwiegand
2025-07-14 12:39:05 +02:00
parent b31492a354
commit f893530471
1798 changed files with 25329 additions and 92638 deletions

View File

@@ -1,9 +1,9 @@
import { APIResource } from "../core/resource.js";
import { APIResource } from "../resource.js";
import { APIPromise } from "../core.js";
import * as Core from "../core.js";
import * as CompletionsAPI from "./completions.js";
import * as CompletionsCompletionsAPI from "./chat/completions/completions.js";
import { APIPromise } from "../core/api-promise.js";
import { Stream } from "../core/streaming.js";
import { RequestOptions } from "../internal/request-options.js";
import { Stream } from "../streaming.js";
export declare class Completions extends APIResource {
/**
* Creates a completion for the provided prompt and parameters.
@@ -16,9 +16,9 @@ export declare class Completions extends APIResource {
* });
* ```
*/
create(body: CompletionCreateParamsNonStreaming, options?: RequestOptions): APIPromise<Completion>;
create(body: CompletionCreateParamsStreaming, options?: RequestOptions): APIPromise<Stream<Completion>>;
create(body: CompletionCreateParamsBase, options?: RequestOptions): APIPromise<Stream<Completion> | Completion>;
create(body: CompletionCreateParamsNonStreaming, options?: Core.RequestOptions): APIPromise<Completion>;
create(body: CompletionCreateParamsStreaming, options?: Core.RequestOptions): APIPromise<Stream<Completion>>;
create(body: CompletionCreateParamsBase, options?: Core.RequestOptions): APIPromise<Stream<Completion> | Completion>;
}
/**
* Represents a completion response from the API. Note: both the streamed and
@@ -74,9 +74,7 @@ export declare namespace CompletionChoice {
text_offset?: Array<number>;
token_logprobs?: Array<number>;
tokens?: Array<string>;
top_logprobs?: Array<{
[key: string]: number;
}>;
top_logprobs?: Array<Record<string, number>>;
}
}
/**
@@ -201,9 +199,7 @@ export interface CompletionCreateParamsBase {
* As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
* from being generated.
*/
logit_bias?: {
[key: string]: number;
} | null;
logit_bias?: Record<string, number> | null;
/**
* Include the log probabilities on the `logprobs` most likely output tokens, as
* well the chosen tokens. For example, if `logprobs` is 5, the API will return a