Fix chat interface - restore continuous conversation flow

🎯 Major improvements to MissionControl component:
- Always keep input field visible and functional after AI responses
- Auto-clear input after submitting questions for better UX
- Add dynamic visual indicators (first question vs follow-up)
- Improve response layout with clear separation and hints
- Enable proper chat-like experience for continuous learning

🌟 Additional enhancements:
- Better language-specific messaging throughout interface
- Clearer visual hierarchy between input and response areas
- Intuitive flow that guides users to ask follow-up questions
- Maintains responsive design and accessibility

🔧 Technical changes:
- Enhanced MissionControl state management
- Improved component layout and styling
- Better TypeScript integration across components
- Updated tsconfig for stricter type checking
This commit is contained in:
rwiegand
2025-07-14 12:39:05 +02:00
parent b31492a354
commit f893530471
1798 changed files with 25329 additions and 92638 deletions

View File

@@ -1,7 +1,5 @@
import { APIResource } from "../core/resource.js";
import { APIPromise } from "../core/api-promise.js";
import { type Uploadable } from "../core/uploads.js";
import { RequestOptions } from "../internal/request-options.js";
import { APIResource } from "../resource.js";
import * as Core from "../core.js";
export declare class Images extends APIResource {
/**
* Creates a variation of a given image. This endpoint only supports `dall-e-2`.
@@ -13,7 +11,7 @@ export declare class Images extends APIResource {
* });
* ```
*/
createVariation(body: ImageCreateVariationParams, options?: RequestOptions): APIPromise<ImagesResponse>;
createVariation(body: ImageCreateVariationParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse>;
/**
* Creates an edited or extended image given one or more source images and a
* prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
@@ -26,7 +24,7 @@ export declare class Images extends APIResource {
* });
* ```
*/
edit(body: ImageEditParams, options?: RequestOptions): APIPromise<ImagesResponse>;
edit(body: ImageEditParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse>;
/**
* Creates an image given a prompt.
* [Learn more](https://platform.openai.com/docs/guides/images).
@@ -38,7 +36,7 @@ export declare class Images extends APIResource {
* });
* ```
*/
generate(body: ImageGenerateParams, options?: RequestOptions): APIPromise<ImagesResponse>;
generate(body: ImageGenerateParams, options?: Core.RequestOptions): Core.APIPromise<ImagesResponse>;
}
/**
* Represents the content or the URL of an image generated by the OpenAI API.
@@ -70,28 +68,10 @@ export interface ImagesResponse {
* The Unix timestamp (in seconds) of when the image was created.
*/
created: number;
/**
* The background parameter used for the image generation. Either `transparent` or
* `opaque`.
*/
background?: 'transparent' | 'opaque';
/**
* The list of generated images.
*/
data?: Array<Image>;
/**
* The output format of the image generation. Either `png`, `webp`, or `jpeg`.
*/
output_format?: 'png' | 'webp' | 'jpeg';
/**
* The quality of the image generated. Either `low`, `medium`, or `high`.
*/
quality?: 'low' | 'medium' | 'high';
/**
* The size of the image generated. Either `1024x1024`, `1024x1536`, or
* `1536x1024`.
*/
size?: '1024x1024' | '1024x1536' | '1536x1024';
/**
* For `gpt-image-1` only, the token usage information for the image generation.
*/
@@ -140,7 +120,7 @@ export interface ImageCreateVariationParams {
* The image to use as the basis for the variation(s). Must be a valid PNG file,
* less than 4MB, and square.
*/
image: Uploadable;
image: Core.Uploadable;
/**
* The model to use for image generation. Only `dall-e-2` is supported at this
* time.
@@ -173,12 +153,12 @@ export interface ImageEditParams {
* The image(s) to edit. Must be a supported image file or an array of images.
*
* For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
* 50MB. You can provide up to 16 images.
* 25MB. You can provide up to 16 images.
*
* For `dall-e-2`, you can only provide one image, and it should be a square `png`
* file less than 4MB.
*/
image: Uploadable | Array<Uploadable>;
image: Core.Uploadable | Array<Core.Uploadable>;
/**
* A text description of the desired image(s). The maximum length is 1000
* characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
@@ -200,7 +180,7 @@ export interface ImageEditParams {
* the mask will be applied on the first image. Must be a valid PNG file, less than
* 4MB, and have the same dimensions as `image`.
*/
mask?: Uploadable;
mask?: Core.Uploadable;
/**
* The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
* supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
@@ -211,18 +191,6 @@ export interface ImageEditParams {
* The number of images to generate. Must be between 1 and 10.
*/
n?: number | null;
/**
* The compression level (0-100%) for the generated images. This parameter is only
* supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
* defaults to 100.
*/
output_compression?: number | null;
/**
* The format in which the generated images are returned. This parameter is only
* supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
* default value is `png`.
*/
output_format?: 'png' | 'jpeg' | 'webp' | null;
/**
* The quality of the image that will be generated. `high`, `medium` and `low` are
* only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.