Files
kidsai/node_modules/openai/resources/graders/grader-models.d.ts
rwiegand f893530471 Fix chat interface - restore continuous conversation flow
🎯 Major improvements to MissionControl component:
- Always keep input field visible and functional after AI responses
- Auto-clear input after submitting questions for better UX
- Add dynamic visual indicators (first question vs follow-up)
- Improve response layout with clear separation and hints
- Enable proper chat-like experience for continuous learning

🌟 Additional enhancements:
- Better language-specific messaging throughout interface
- Clearer visual hierarchy between input and response areas
- Intuitive flow that guides users to ask follow-up questions
- Maintains responsive design and accessibility

🔧 Technical changes:
- Enhanced MissionControl state management
- Improved component layout and styling
- Better TypeScript integration across components
- Updated tsconfig for stricter type checking
2025-07-14 12:39:05 +02:00

234 lines
6.6 KiB
TypeScript

import { APIResource } from "../../resource.js";
import * as ResponsesAPI from "../responses/responses.js";
export declare class GraderModels extends APIResource {
}
/**
* A LabelModelGrader object which uses a model to assign labels to each item in
* the evaluation.
*/
export interface LabelModelGrader {
input: Array<LabelModelGrader.Input>;
/**
* The labels to assign to each item in the evaluation.
*/
labels: Array<string>;
/**
* The model to use for the evaluation. Must support structured outputs.
*/
model: string;
/**
* The name of the grader.
*/
name: string;
/**
* The labels that indicate a passing result. Must be a subset of labels.
*/
passing_labels: Array<string>;
/**
* The object type, which is always `label_model`.
*/
type: 'label_model';
}
export declare namespace LabelModelGrader {
/**
* A message input to the model with a role indicating instruction following
* hierarchy. Instructions given with the `developer` or `system` role take
* precedence over instructions given with the `user` role. Messages with the
* `assistant` role are presumed to have been generated by the model in previous
* interactions.
*/
interface Input {
/**
* Text inputs to the model - can contain template strings.
*/
content: string | ResponsesAPI.ResponseInputText | Input.OutputText;
/**
* The role of the message input. One of `user`, `assistant`, `system`, or
* `developer`.
*/
role: 'user' | 'assistant' | 'system' | 'developer';
/**
* The type of the message input. Always `message`.
*/
type?: 'message';
}
namespace Input {
/**
* A text output from the model.
*/
interface OutputText {
/**
* The text output from the model.
*/
text: string;
/**
* The type of the output text. Always `output_text`.
*/
type: 'output_text';
}
}
}
/**
* A MultiGrader object combines the output of multiple graders to produce a single
* score.
*/
export interface MultiGrader {
/**
* A formula to calculate the output based on grader results.
*/
calculate_output: string;
graders: Record<string, StringCheckGrader | TextSimilarityGrader | PythonGrader | ScoreModelGrader | LabelModelGrader>;
/**
* The name of the grader.
*/
name: string;
/**
* The object type, which is always `multi`.
*/
type: 'multi';
}
/**
* A PythonGrader object that runs a python script on the input.
*/
export interface PythonGrader {
/**
* The name of the grader.
*/
name: string;
/**
* The source code of the python script.
*/
source: string;
/**
* The object type, which is always `python`.
*/
type: 'python';
/**
* The image tag to use for the python script.
*/
image_tag?: string;
}
/**
* A ScoreModelGrader object that uses a model to assign a score to the input.
*/
export interface ScoreModelGrader {
/**
* The input text. This may include template strings.
*/
input: Array<ScoreModelGrader.Input>;
/**
* The model to use for the evaluation.
*/
model: string;
/**
* The name of the grader.
*/
name: string;
/**
* The object type, which is always `score_model`.
*/
type: 'score_model';
/**
* The range of the score. Defaults to `[0, 1]`.
*/
range?: Array<number>;
/**
* The sampling parameters for the model.
*/
sampling_params?: unknown;
}
export declare namespace ScoreModelGrader {
/**
* A message input to the model with a role indicating instruction following
* hierarchy. Instructions given with the `developer` or `system` role take
* precedence over instructions given with the `user` role. Messages with the
* `assistant` role are presumed to have been generated by the model in previous
* interactions.
*/
interface Input {
/**
* Text inputs to the model - can contain template strings.
*/
content: string | ResponsesAPI.ResponseInputText | Input.OutputText;
/**
* The role of the message input. One of `user`, `assistant`, `system`, or
* `developer`.
*/
role: 'user' | 'assistant' | 'system' | 'developer';
/**
* The type of the message input. Always `message`.
*/
type?: 'message';
}
namespace Input {
/**
* A text output from the model.
*/
interface OutputText {
/**
* The text output from the model.
*/
text: string;
/**
* The type of the output text. Always `output_text`.
*/
type: 'output_text';
}
}
}
/**
* A StringCheckGrader object that performs a string comparison between input and
* reference using a specified operation.
*/
export interface StringCheckGrader {
/**
* The input text. This may include template strings.
*/
input: string;
/**
* The name of the grader.
*/
name: string;
/**
* The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`.
*/
operation: 'eq' | 'ne' | 'like' | 'ilike';
/**
* The reference text. This may include template strings.
*/
reference: string;
/**
* The object type, which is always `string_check`.
*/
type: 'string_check';
}
/**
* A TextSimilarityGrader object which grades text based on similarity metrics.
*/
export interface TextSimilarityGrader {
/**
* The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
* `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
*/
evaluation_metric: 'fuzzy_match' | 'bleu' | 'gleu' | 'meteor' | 'rouge_1' | 'rouge_2' | 'rouge_3' | 'rouge_4' | 'rouge_5' | 'rouge_l';
/**
* The text being graded.
*/
input: string;
/**
* The name of the grader.
*/
name: string;
/**
* The text being graded against.
*/
reference: string;
/**
* The type of grader.
*/
type: 'text_similarity';
}
export declare namespace GraderModels {
export { type LabelModelGrader as LabelModelGrader, type MultiGrader as MultiGrader, type PythonGrader as PythonGrader, type ScoreModelGrader as ScoreModelGrader, type StringCheckGrader as StringCheckGrader, type TextSimilarityGrader as TextSimilarityGrader, };
}
//# sourceMappingURL=grader-models.d.ts.map