✅ Fix chat interface - restore continuous conversation flow
🎯 Major improvements to MissionControl component: - Always keep input field visible and functional after AI responses - Auto-clear input after submitting questions for better UX - Add dynamic visual indicators (first question vs follow-up) - Improve response layout with clear separation and hints - Enable proper chat-like experience for continuous learning 🌟 Additional enhancements: - Better language-specific messaging throughout interface - Clearer visual hierarchy between input and response areas - Intuitive flow that guides users to ask follow-up questions - Maintains responsive design and accessibility 🔧 Technical changes: - Enhanced MissionControl state management - Improved component layout and styling - Better TypeScript integration across components - Updated tsconfig for stricter type checking
This commit is contained in:
50
node_modules/openai/resources/beta/realtime/realtime.d.ts
generated
vendored
50
node_modules/openai/resources/beta/realtime/realtime.d.ts
generated
vendored
@@ -1,4 +1,4 @@
|
||||
import { APIResource } from "../../../core/resource.js";
|
||||
import { APIResource } from "../../../resource.js";
|
||||
import * as RealtimeAPI from "./realtime.js";
|
||||
import * as Shared from "../../shared.js";
|
||||
import * as SessionsAPI from "./sessions.js";
|
||||
@@ -231,9 +231,9 @@ export interface ConversationItemDeletedEvent {
|
||||
* the Response events.
|
||||
*
|
||||
* Realtime API models accept audio natively, and thus input transcription is a
|
||||
* separate process run on a separate ASR (Automatic Speech Recognition) model. The
|
||||
* transcript may diverge somewhat from the model's interpretation, and should be
|
||||
* treated as a rough guide.
|
||||
* separate process run on a separate ASR (Automatic Speech Recognition) model,
|
||||
* currently always `whisper-1`. Thus the transcript may diverge somewhat from the
|
||||
* model's interpretation, and should be treated as a rough guide.
|
||||
*/
|
||||
export interface ConversationItemInputAudioTranscriptionCompletedEvent {
|
||||
/**
|
||||
@@ -1783,18 +1783,12 @@ export declare namespace SessionUpdateEvent {
|
||||
/**
|
||||
* The Realtime model used for this session.
|
||||
*/
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
* For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
*/
|
||||
output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
* temperature of 0.8 is highly recommended for best performance.
|
||||
@@ -1809,14 +1803,6 @@ export declare namespace SessionUpdateEvent {
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<Session.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | Session.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
@@ -1846,18 +1832,18 @@ export declare namespace SessionUpdateEvent {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
expires_after?: ClientSecret.ExpiresAfter;
|
||||
expires_at?: ClientSecret.ExpiresAt;
|
||||
}
|
||||
namespace ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
interface ExpiresAfter {
|
||||
interface ExpiresAt {
|
||||
/**
|
||||
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
||||
* currently supported.
|
||||
*/
|
||||
anchor: 'created_at';
|
||||
anchor?: 'created_at';
|
||||
/**
|
||||
* The number of seconds from the anchor point to the expiration. Select a value
|
||||
* between `10` and `7200`.
|
||||
@@ -1930,26 +1916,6 @@ export declare namespace SessionUpdateEvent {
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
|
||||
Reference in New Issue
Block a user