🎯 Major improvements to MissionControl component: - Always keep input field visible and functional after AI responses - Auto-clear input after submitting questions for better UX - Add dynamic visual indicators (first question vs follow-up) - Improve response layout with clear separation and hints - Enable proper chat-like experience for continuous learning 🌟 Additional enhancements: - Better language-specific messaging throughout interface - Clearer visual hierarchy between input and response areas - Intuitive flow that guides users to ask follow-up questions - Maintains responsive design and accessibility 🔧 Technical changes: - Enhanced MissionControl state management - Improved component layout and styling - Better TypeScript integration across components - Updated tsconfig for stricter type checking
53 lines
2.0 KiB
JavaScript
53 lines
2.0 KiB
JavaScript
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
import { APIResource } from "../../resource.mjs";
|
|
import { isRequestOptions } from "../../core.mjs";
|
|
import * as RunsAPI from "./runs/runs.mjs";
|
|
import { RunListResponsesPage, Runs, } from "./runs/runs.mjs";
|
|
import { CursorPage } from "../../pagination.mjs";
|
|
export class Evals extends APIResource {
|
|
constructor() {
|
|
super(...arguments);
|
|
this.runs = new RunsAPI.Runs(this._client);
|
|
}
|
|
/**
|
|
* Create the structure of an evaluation that can be used to test a model's
|
|
* performance. An evaluation is a set of testing criteria and the config for a
|
|
* data source, which dictates the schema of the data used in the evaluation. After
|
|
* creating an evaluation, you can run it on different models and model parameters.
|
|
* We support several types of graders and datasources. For more information, see
|
|
* the [Evals guide](https://platform.openai.com/docs/guides/evals).
|
|
*/
|
|
create(body, options) {
|
|
return this._client.post('/evals', { body, ...options });
|
|
}
|
|
/**
|
|
* Get an evaluation by ID.
|
|
*/
|
|
retrieve(evalId, options) {
|
|
return this._client.get(`/evals/${evalId}`, options);
|
|
}
|
|
/**
|
|
* Update certain properties of an evaluation.
|
|
*/
|
|
update(evalId, body, options) {
|
|
return this._client.post(`/evals/${evalId}`, { body, ...options });
|
|
}
|
|
list(query = {}, options) {
|
|
if (isRequestOptions(query)) {
|
|
return this.list({}, query);
|
|
}
|
|
return this._client.getAPIList('/evals', EvalListResponsesPage, { query, ...options });
|
|
}
|
|
/**
|
|
* Delete an evaluation.
|
|
*/
|
|
del(evalId, options) {
|
|
return this._client.delete(`/evals/${evalId}`, options);
|
|
}
|
|
}
|
|
export class EvalListResponsesPage extends CursorPage {
|
|
}
|
|
Evals.EvalListResponsesPage = EvalListResponsesPage;
|
|
Evals.Runs = Runs;
|
|
Evals.RunListResponsesPage = RunListResponsesPage;
|
|
//# sourceMappingURL=evals.mjs.map
|