Fix chat interface - restore continuous conversation flow

🎯 Major improvements to MissionControl component:
- Always keep input field visible and functional after AI responses
- Auto-clear input after submitting questions for better UX
- Add dynamic visual indicators (first question vs follow-up)
- Improve response layout with clear separation and hints
- Enable proper chat-like experience for continuous learning

🌟 Additional enhancements:
- Better language-specific messaging throughout interface
- Clearer visual hierarchy between input and response areas
- Intuitive flow that guides users to ask follow-up questions
- Maintains responsive design and accessibility

🔧 Technical changes:
- Enhanced MissionControl state management
- Improved component layout and styling
- Better TypeScript integration across components
- Updated tsconfig for stricter type checking
This commit is contained in:
rwiegand
2025-07-14 12:39:05 +02:00
parent b31492a354
commit f893530471
1798 changed files with 25329 additions and 92638 deletions

View File

@@ -1,10 +1,8 @@
import { APIResource } from "../../core/resource.js";
import { APIResource } from "../../resource.js";
import * as Core from "../../core.js";
import * as TranscriptionsAPI from "./transcriptions.js";
import * as AudioAPI from "./audio.js";
import { APIPromise } from "../../core/api-promise.js";
import { Stream } from "../../core/streaming.js";
import { type Uploadable } from "../../core/uploads.js";
import { RequestOptions } from "../../internal/request-options.js";
import { Stream } from "../../streaming.js";
export declare class Transcriptions extends APIResource {
/**
* Transcribes audio into the input language.
@@ -18,12 +16,12 @@ export declare class Transcriptions extends APIResource {
* });
* ```
*/
create(body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, options?: RequestOptions): APIPromise<Transcription>;
create(body: TranscriptionCreateParamsNonStreaming<'verbose_json'>, options?: RequestOptions): APIPromise<TranscriptionVerbose>;
create(body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>, options?: RequestOptions): APIPromise<string>;
create(body: TranscriptionCreateParamsNonStreaming, options?: RequestOptions): APIPromise<Transcription>;
create(body: TranscriptionCreateParamsStreaming, options?: RequestOptions): APIPromise<Stream<TranscriptionStreamEvent>>;
create(body: TranscriptionCreateParamsStreaming, options?: RequestOptions): APIPromise<TranscriptionCreateResponse | string | Stream<TranscriptionStreamEvent>>;
create(body: TranscriptionCreateParamsNonStreaming<'json' | undefined>, options?: Core.RequestOptions): Core.APIPromise<Transcription>;
create(body: TranscriptionCreateParamsNonStreaming<'verbose_json'>, options?: Core.RequestOptions): Core.APIPromise<TranscriptionVerbose>;
create(body: TranscriptionCreateParamsNonStreaming<'srt' | 'vtt' | 'text'>, options?: Core.RequestOptions): Core.APIPromise<string>;
create(body: TranscriptionCreateParamsNonStreaming, options?: Core.RequestOptions): Core.APIPromise<Transcription>;
create(body: TranscriptionCreateParamsStreaming, options?: Core.RequestOptions): Core.APIPromise<Stream<TranscriptionStreamEvent>>;
create(body: TranscriptionCreateParamsStreaming, options?: Core.RequestOptions): Core.APIPromise<TranscriptionCreateResponse | string | Stream<TranscriptionStreamEvent>>;
}
/**
* Represents a transcription response returned by model, based on the provided
@@ -40,10 +38,6 @@ export interface Transcription {
* to the `include` array.
*/
logprobs?: Array<Transcription.Logprob>;
/**
* Token usage statistics for the request.
*/
usage?: Transcription.Tokens | Transcription.Duration;
}
export declare namespace Transcription {
interface Logprob {
@@ -60,59 +54,6 @@ export declare namespace Transcription {
*/
logprob?: number;
}
/**
* Usage statistics for models billed by token usage.
*/
interface Tokens {
/**
* Number of input tokens billed for this request.
*/
input_tokens: number;
/**
* Number of output tokens generated.
*/
output_tokens: number;
/**
* Total number of tokens used (input + output).
*/
total_tokens: number;
/**
* The type of the usage object. Always `tokens` for this variant.
*/
type: 'tokens';
/**
* Details about the input tokens billed for this request.
*/
input_token_details?: Tokens.InputTokenDetails;
}
namespace Tokens {
/**
* Details about the input tokens billed for this request.
*/
interface InputTokenDetails {
/**
* Number of audio tokens billed for this request.
*/
audio_tokens?: number;
/**
* Number of text tokens billed for this request.
*/
text_tokens?: number;
}
}
/**
* Usage statistics for models billed by audio input duration.
*/
interface Duration {
/**
* Duration of the input audio in seconds.
*/
duration: number;
/**
* The type of the usage object. Always `duration` for this variant.
*/
type: 'duration';
}
}
export type TranscriptionInclude = 'logprobs';
export interface TranscriptionSegment {
@@ -198,7 +139,7 @@ export declare namespace TranscriptionTextDeltaEvent {
/**
* The bytes that were used to generate the log probability.
*/
bytes?: Array<number>;
bytes?: Array<unknown>;
/**
* The log probability of the token.
*/
@@ -227,10 +168,6 @@ export interface TranscriptionTextDoneEvent {
* with the `include[]` parameter set to `logprobs`.
*/
logprobs?: Array<TranscriptionTextDoneEvent.Logprob>;
/**
* Usage statistics for models billed by token usage.
*/
usage?: TranscriptionTextDoneEvent.Usage;
}
export declare namespace TranscriptionTextDoneEvent {
interface Logprob {
@@ -241,52 +178,12 @@ export declare namespace TranscriptionTextDoneEvent {
/**
* The bytes that were used to generate the log probability.
*/
bytes?: Array<number>;
bytes?: Array<unknown>;
/**
* The log probability of the token.
*/
logprob?: number;
}
/**
* Usage statistics for models billed by token usage.
*/
interface Usage {
/**
* Number of input tokens billed for this request.
*/
input_tokens: number;
/**
* Number of output tokens generated.
*/
output_tokens: number;
/**
* Total number of tokens used (input + output).
*/
total_tokens: number;
/**
* The type of the usage object. Always `tokens` for this variant.
*/
type: 'tokens';
/**
* Details about the input tokens billed for this request.
*/
input_token_details?: Usage.InputTokenDetails;
}
namespace Usage {
/**
* Details about the input tokens billed for this request.
*/
interface InputTokenDetails {
/**
* Number of audio tokens billed for this request.
*/
audio_tokens?: number;
/**
* Number of text tokens billed for this request.
*/
text_tokens?: number;
}
}
}
/**
* Represents a verbose json transcription response returned by model, based on the
@@ -309,30 +206,11 @@ export interface TranscriptionVerbose {
* Segments of the transcribed text and their corresponding details.
*/
segments?: Array<TranscriptionSegment>;
/**
* Usage statistics for models billed by audio input duration.
*/
usage?: TranscriptionVerbose.Usage;
/**
* Extracted words and their corresponding timestamps.
*/
words?: Array<TranscriptionWord>;
}
export declare namespace TranscriptionVerbose {
/**
* Usage statistics for models billed by audio input duration.
*/
interface Usage {
/**
* Duration of the input audio in seconds.
*/
duration: number;
/**
* The type of the usage object. Always `duration` for this variant.
*/
type: 'duration';
}
}
export interface TranscriptionWord {
/**
* End time of the word in seconds.
@@ -358,7 +236,7 @@ export interface TranscriptionCreateParamsBase<ResponseFormat extends AudioAPI.A
* The audio file object (not file name) to transcribe, in one of these formats:
* flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
*/
file: Uploadable;
file: Core.Uploadable;
/**
* ID of the model to use. The options are `gpt-4o-transcribe`,
* `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source