Initial commit: KidsAI Explorer with complete functionality
- Complete KidsAI Explorer application - Multi-language support (English/German) - AI-powered educational guidance using OpenAI - Interactive chat interface for children - Proper placeholder translation fixes - Mobile-responsive design - Educational framework for critical thinking
This commit is contained in:
4
node_modules/openai/resources/beta/realtime/index.d.mts
generated
vendored
Normal file
4
node_modules/openai/resources/beta/realtime/index.d.mts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export { Realtime } from "./realtime.mjs";
|
||||
export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from "./sessions.mjs";
|
||||
export { TranscriptionSessions, type TranscriptionSession, type TranscriptionSessionCreateParams, } from "./transcription-sessions.mjs";
|
||||
//# sourceMappingURL=index.d.mts.map
|
||||
1
node_modules/openai/resources/beta/realtime/index.d.mts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/index.d.mts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.mts","sourceRoot":"","sources":["../../../src/resources/beta/realtime/index.ts"],"names":[],"mappings":"OAEO,EAAE,QAAQ,EAAE;OACZ,EAAE,QAAQ,EAAE,KAAK,OAAO,EAAE,KAAK,qBAAqB,EAAE,KAAK,mBAAmB,EAAE;OAChF,EACL,qBAAqB,EACrB,KAAK,oBAAoB,EACzB,KAAK,gCAAgC,GACtC"}
|
||||
4
node_modules/openai/resources/beta/realtime/index.d.ts
generated
vendored
Normal file
4
node_modules/openai/resources/beta/realtime/index.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export { Realtime } from "./realtime.js";
|
||||
export { Sessions, type Session, type SessionCreateResponse, type SessionCreateParams } from "./sessions.js";
|
||||
export { TranscriptionSessions, type TranscriptionSession, type TranscriptionSessionCreateParams, } from "./transcription-sessions.js";
|
||||
//# sourceMappingURL=index.d.ts.map
|
||||
1
node_modules/openai/resources/beta/realtime/index.d.ts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/index.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/resources/beta/realtime/index.ts"],"names":[],"mappings":"OAEO,EAAE,QAAQ,EAAE;OACZ,EAAE,QAAQ,EAAE,KAAK,OAAO,EAAE,KAAK,qBAAqB,EAAE,KAAK,mBAAmB,EAAE;OAChF,EACL,qBAAqB,EACrB,KAAK,oBAAoB,EACzB,KAAK,gCAAgC,GACtC"}
|
||||
11
node_modules/openai/resources/beta/realtime/index.js
generated
vendored
Normal file
11
node_modules/openai/resources/beta/realtime/index.js
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
"use strict";
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TranscriptionSessions = exports.Sessions = exports.Realtime = void 0;
|
||||
var realtime_1 = require("./realtime.js");
|
||||
Object.defineProperty(exports, "Realtime", { enumerable: true, get: function () { return realtime_1.Realtime; } });
|
||||
var sessions_1 = require("./sessions.js");
|
||||
Object.defineProperty(exports, "Sessions", { enumerable: true, get: function () { return sessions_1.Sessions; } });
|
||||
var transcription_sessions_1 = require("./transcription-sessions.js");
|
||||
Object.defineProperty(exports, "TranscriptionSessions", { enumerable: true, get: function () { return transcription_sessions_1.TranscriptionSessions; } });
|
||||
//# sourceMappingURL=index.js.map
|
||||
1
node_modules/openai/resources/beta/realtime/index.js.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/index.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../../src/resources/beta/realtime/index.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;AAEtF,0CAAsC;AAA7B,oGAAA,QAAQ,OAAA;AACjB,0CAA0G;AAAjG,oGAAA,QAAQ,OAAA;AACjB,sEAIkC;AAHhC,+HAAA,qBAAqB,OAAA"}
|
||||
5
node_modules/openai/resources/beta/realtime/index.mjs
generated
vendored
Normal file
5
node_modules/openai/resources/beta/realtime/index.mjs
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
export { Realtime } from "./realtime.mjs";
|
||||
export { Sessions } from "./sessions.mjs";
|
||||
export { TranscriptionSessions, } from "./transcription-sessions.mjs";
|
||||
//# sourceMappingURL=index.mjs.map
|
||||
1
node_modules/openai/resources/beta/realtime/index.mjs.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/index.mjs.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"index.mjs","sourceRoot":"","sources":["../../../src/resources/beta/realtime/index.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,QAAQ,EAAE;OACZ,EAAE,QAAQ,EAAsE;OAChF,EACL,qBAAqB,GAGtB"}
|
||||
2245
node_modules/openai/resources/beta/realtime/realtime.d.mts
generated
vendored
Normal file
2245
node_modules/openai/resources/beta/realtime/realtime.d.mts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
node_modules/openai/resources/beta/realtime/realtime.d.mts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/realtime.d.mts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2245
node_modules/openai/resources/beta/realtime/realtime.d.ts
generated
vendored
Normal file
2245
node_modules/openai/resources/beta/realtime/realtime.d.ts
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1
node_modules/openai/resources/beta/realtime/realtime.d.ts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/realtime.d.ts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
21
node_modules/openai/resources/beta/realtime/realtime.js
generated
vendored
Normal file
21
node_modules/openai/resources/beta/realtime/realtime.js
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
"use strict";
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Realtime = void 0;
|
||||
const tslib_1 = require("../../../internal/tslib.js");
|
||||
const resource_1 = require("../../../core/resource.js");
|
||||
const SessionsAPI = tslib_1.__importStar(require("./sessions.js"));
|
||||
const sessions_1 = require("./sessions.js");
|
||||
const TranscriptionSessionsAPI = tslib_1.__importStar(require("./transcription-sessions.js"));
|
||||
const transcription_sessions_1 = require("./transcription-sessions.js");
|
||||
class Realtime extends resource_1.APIResource {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.sessions = new SessionsAPI.Sessions(this._client);
|
||||
this.transcriptionSessions = new TranscriptionSessionsAPI.TranscriptionSessions(this._client);
|
||||
}
|
||||
}
|
||||
exports.Realtime = Realtime;
|
||||
Realtime.Sessions = sessions_1.Sessions;
|
||||
Realtime.TranscriptionSessions = transcription_sessions_1.TranscriptionSessions;
|
||||
//# sourceMappingURL=realtime.js.map
|
||||
1
node_modules/openai/resources/beta/realtime/realtime.js.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/realtime.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"realtime.js","sourceRoot":"","sources":["../../../src/resources/beta/realtime/realtime.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;;AAEtF,wDAAqD;AAGrD,mEAA0C;AAC1C,4CAKoB;AACpB,8FAAqE;AACrE,wEAIkC;AAElC,MAAa,QAAS,SAAQ,sBAAW;IAAzC;;QACE,aAAQ,GAAyB,IAAI,WAAW,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QACxE,0BAAqB,GACnB,IAAI,wBAAwB,CAAC,qBAAqB,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IACrE,CAAC;CAAA;AAJD,4BAIC;AA6mFD,QAAQ,CAAC,QAAQ,GAAG,mBAAQ,CAAC;AAC7B,QAAQ,CAAC,qBAAqB,GAAG,8CAAqB,CAAC"}
|
||||
16
node_modules/openai/resources/beta/realtime/realtime.mjs
generated
vendored
Normal file
16
node_modules/openai/resources/beta/realtime/realtime.mjs
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
import { APIResource } from "../../../core/resource.mjs";
|
||||
import * as SessionsAPI from "./sessions.mjs";
|
||||
import { Sessions, } from "./sessions.mjs";
|
||||
import * as TranscriptionSessionsAPI from "./transcription-sessions.mjs";
|
||||
import { TranscriptionSessions, } from "./transcription-sessions.mjs";
|
||||
export class Realtime extends APIResource {
|
||||
constructor() {
|
||||
super(...arguments);
|
||||
this.sessions = new SessionsAPI.Sessions(this._client);
|
||||
this.transcriptionSessions = new TranscriptionSessionsAPI.TranscriptionSessions(this._client);
|
||||
}
|
||||
}
|
||||
Realtime.Sessions = Sessions;
|
||||
Realtime.TranscriptionSessions = TranscriptionSessions;
|
||||
//# sourceMappingURL=realtime.mjs.map
|
||||
1
node_modules/openai/resources/beta/realtime/realtime.mjs.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/realtime.mjs.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"realtime.mjs","sourceRoot":"","sources":["../../../src/resources/beta/realtime/realtime.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,WAAW,EAAE;OAGf,KAAK,WAAW;OAChB,EAIL,QAAQ,GACT;OACM,KAAK,wBAAwB;OAC7B,EAGL,qBAAqB,GACtB;AAED,MAAM,OAAO,QAAS,SAAQ,WAAW;IAAzC;;QACE,aAAQ,GAAyB,IAAI,WAAW,CAAC,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;QACxE,0BAAqB,GACnB,IAAI,wBAAwB,CAAC,qBAAqB,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IACrE,CAAC;CAAA;AA6mFD,QAAQ,CAAC,QAAQ,GAAG,QAAQ,CAAC;AAC7B,QAAQ,CAAC,qBAAqB,GAAG,qBAAqB,CAAC"}
|
||||
746
node_modules/openai/resources/beta/realtime/sessions.d.mts
generated
vendored
Normal file
746
node_modules/openai/resources/beta/realtime/sessions.d.mts
generated
vendored
Normal file
@@ -0,0 +1,746 @@
|
||||
import { APIResource } from "../../../core/resource.mjs";
|
||||
import { APIPromise } from "../../../core/api-promise.mjs";
|
||||
import { RequestOptions } from "../../../internal/request-options.mjs";
|
||||
export declare class Sessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API. Can be configured with the same session parameters as the
|
||||
* `session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const session =
|
||||
* await client.beta.realtime.sessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body: SessionCreateParams, options?: RequestOptions): APIPromise<SessionCreateResponse>;
|
||||
}
|
||||
/**
|
||||
* Realtime session object configuration.
|
||||
*/
|
||||
export interface Session {
|
||||
/**
|
||||
* Unique identifier for the session that looks like `sess_1234567890abcdef`.
|
||||
*/
|
||||
id?: string;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: Session.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
input_audio_transcription?: Session.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The Realtime model used for this session.
|
||||
*/
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
* For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
*/
|
||||
output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
* temperature of 0.8 is highly recommended for best performance.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<Session.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | Session.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: Session.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
||||
* `shimmer`, and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace Session {
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
/**
|
||||
* A new Realtime session configuration, with an ephermeral key. Default TTL for
|
||||
* keys is one minute.
|
||||
*/
|
||||
export interface SessionCreateResponse {
|
||||
/**
|
||||
* Ephemeral key returned by the API.
|
||||
*/
|
||||
client_secret: SessionCreateResponse.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
input_audio_format?: string;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously and should be treated as rough guidance rather than the
|
||||
* representation understood by the model.
|
||||
*/
|
||||
input_audio_transcription?: SessionCreateResponse.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
output_audio_format?: string;
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<SessionCreateResponse.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | SessionCreateResponse.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
turn_detection?: SessionCreateResponse.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace SessionCreateResponse {
|
||||
/**
|
||||
* Ephemeral key returned by the API.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Timestamp for when the token expires. Currently, all tokens expire after one
|
||||
* minute.
|
||||
*/
|
||||
expires_at: number;
|
||||
/**
|
||||
* Ephemeral key usable in client environments to authenticate connections to the
|
||||
* Realtime API. Use this in client-side environments rather than a standard API
|
||||
* token, which should only be used server-side.
|
||||
*/
|
||||
value: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously and should be treated as rough guidance rather than the
|
||||
* representation understood by the model.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The model to use for transcription.
|
||||
*/
|
||||
model?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
||||
* Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
||||
* With shorter values the model will respond more quickly, but may jump in on
|
||||
* short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
||||
* threshold will require louder audio to activate the model, and thus might
|
||||
* perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection, only `server_vad` is currently supported.
|
||||
*/
|
||||
type?: string;
|
||||
}
|
||||
}
|
||||
export interface SessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
client_secret?: SessionCreateParams.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
input_audio_transcription?: SessionCreateParams.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The Realtime model used for this session.
|
||||
*/
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
* For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
*/
|
||||
output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
* temperature of 0.8 is highly recommended for best performance.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<SessionCreateParams.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | SessionCreateParams.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: SessionCreateParams.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
||||
* `shimmer`, and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace SessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
expires_after?: ClientSecret.ExpiresAfter;
|
||||
}
|
||||
namespace ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
interface ExpiresAfter {
|
||||
/**
|
||||
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
||||
* currently supported.
|
||||
*/
|
||||
anchor: 'created_at';
|
||||
/**
|
||||
* The number of seconds from the anchor point to the expiration. Select a value
|
||||
* between `10` and `7200`.
|
||||
*/
|
||||
seconds?: number;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
export declare namespace Sessions {
|
||||
export { type Session as Session, type SessionCreateResponse as SessionCreateResponse, type SessionCreateParams as SessionCreateParams, };
|
||||
}
|
||||
//# sourceMappingURL=sessions.d.mts.map
|
||||
1
node_modules/openai/resources/beta/realtime/sessions.d.mts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/sessions.d.mts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
746
node_modules/openai/resources/beta/realtime/sessions.d.ts
generated
vendored
Normal file
746
node_modules/openai/resources/beta/realtime/sessions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,746 @@
|
||||
import { APIResource } from "../../../core/resource.js";
|
||||
import { APIPromise } from "../../../core/api-promise.js";
|
||||
import { RequestOptions } from "../../../internal/request-options.js";
|
||||
export declare class Sessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API. Can be configured with the same session parameters as the
|
||||
* `session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const session =
|
||||
* await client.beta.realtime.sessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body: SessionCreateParams, options?: RequestOptions): APIPromise<SessionCreateResponse>;
|
||||
}
|
||||
/**
|
||||
* Realtime session object configuration.
|
||||
*/
|
||||
export interface Session {
|
||||
/**
|
||||
* Unique identifier for the session that looks like `sess_1234567890abcdef`.
|
||||
*/
|
||||
id?: string;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: Session.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
input_audio_transcription?: Session.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The Realtime model used for this session.
|
||||
*/
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
* For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
*/
|
||||
output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
* temperature of 0.8 is highly recommended for best performance.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<Session.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | Session.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: Session.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
||||
* `shimmer`, and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace Session {
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
/**
|
||||
* A new Realtime session configuration, with an ephermeral key. Default TTL for
|
||||
* keys is one minute.
|
||||
*/
|
||||
export interface SessionCreateResponse {
|
||||
/**
|
||||
* Ephemeral key returned by the API.
|
||||
*/
|
||||
client_secret: SessionCreateResponse.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
input_audio_format?: string;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously and should be treated as rough guidance rather than the
|
||||
* representation understood by the model.
|
||||
*/
|
||||
input_audio_transcription?: SessionCreateResponse.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
output_audio_format?: string;
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. Defaults to 0.8.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<SessionCreateResponse.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | SessionCreateResponse.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
turn_detection?: SessionCreateResponse.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo` `sage`, `shimmer` and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace SessionCreateResponse {
|
||||
/**
|
||||
* Ephemeral key returned by the API.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Timestamp for when the token expires. Currently, all tokens expire after one
|
||||
* minute.
|
||||
*/
|
||||
expires_at: number;
|
||||
/**
|
||||
* Ephemeral key usable in client environments to authenticate connections to the
|
||||
* Realtime API. Use this in client-side environments rather than a standard API
|
||||
* token, which should only be used server-side.
|
||||
*/
|
||||
value: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously and should be treated as rough guidance rather than the
|
||||
* representation understood by the model.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The model to use for transcription.
|
||||
*/
|
||||
model?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
||||
* Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
||||
* With shorter values the model will respond more quickly, but may jump in on
|
||||
* short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
||||
* threshold will require louder audio to activate the model, and thus might
|
||||
* perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection, only `server_vad` is currently supported.
|
||||
*/
|
||||
type?: string;
|
||||
}
|
||||
}
|
||||
export interface SessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
client_secret?: SessionCreateParams.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: SessionCreateParams.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
input_audio_transcription?: SessionCreateParams.InputAudioTranscription;
|
||||
/**
|
||||
* The default system instructions (i.e. system message) prepended to model calls.
|
||||
* This field allows the client to guide the model on desired responses. The model
|
||||
* can be instructed on response content and format, (e.g. "be extremely succinct",
|
||||
* "act friendly", "here are examples of good responses") and on audio behavior
|
||||
* (e.g. "talk quickly", "inject emotion into your voice", "laugh frequently"). The
|
||||
* instructions are not guaranteed to be followed by the model, but they provide
|
||||
* guidance to the model on the desired behavior.
|
||||
*
|
||||
* Note that the server sets default instructions which will be used if this field
|
||||
* is not set and are visible in the `session.created` event at the start of the
|
||||
* session.
|
||||
*/
|
||||
instructions?: string;
|
||||
/**
|
||||
* Maximum number of output tokens for a single assistant response, inclusive of
|
||||
* tool calls. Provide an integer between 1 and 4096 to limit output tokens, or
|
||||
* `inf` for the maximum available tokens for a given model. Defaults to `inf`.
|
||||
*/
|
||||
max_response_output_tokens?: number | 'inf';
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* The Realtime model used for this session.
|
||||
*/
|
||||
model?: 'gpt-4o-realtime-preview' | 'gpt-4o-realtime-preview-2024-10-01' | 'gpt-4o-realtime-preview-2024-12-17' | 'gpt-4o-realtime-preview-2025-06-03' | 'gpt-4o-mini-realtime-preview' | 'gpt-4o-mini-realtime-preview-2024-12-17';
|
||||
/**
|
||||
* The format of output audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
* For `pcm16`, output audio is sampled at a rate of 24kHz.
|
||||
*/
|
||||
output_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* The speed of the model's spoken response. 1.0 is the default speed. 0.25 is the
|
||||
* minimum speed. 1.5 is the maximum speed. This value can only be changed in
|
||||
* between model turns, not while a response is in progress.
|
||||
*/
|
||||
speed?: number;
|
||||
/**
|
||||
* Sampling temperature for the model, limited to [0.6, 1.2]. For audio models a
|
||||
* temperature of 0.8 is highly recommended for best performance.
|
||||
*/
|
||||
temperature?: number;
|
||||
/**
|
||||
* How the model chooses tools. Options are `auto`, `none`, `required`, or specify
|
||||
* a function.
|
||||
*/
|
||||
tool_choice?: string;
|
||||
/**
|
||||
* Tools (functions) available to the model.
|
||||
*/
|
||||
tools?: Array<SessionCreateParams.Tool>;
|
||||
/**
|
||||
* Configuration options for tracing. Set to null to disable tracing. Once tracing
|
||||
* is enabled for a session, the configuration cannot be modified.
|
||||
*
|
||||
* `auto` will create a trace for the session with default values for the workflow
|
||||
* name, group id, and metadata.
|
||||
*/
|
||||
tracing?: 'auto' | SessionCreateParams.TracingConfiguration;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: SessionCreateParams.TurnDetection;
|
||||
/**
|
||||
* The voice the model uses to respond. Voice cannot be changed during the session
|
||||
* once the model has responded with audio at least once. Current voice options are
|
||||
* `alloy`, `ash`, `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`,
|
||||
* `shimmer`, and `verse`.
|
||||
*/
|
||||
voice?: (string & {}) | 'alloy' | 'ash' | 'ballad' | 'coral' | 'echo' | 'fable' | 'onyx' | 'nova' | 'sage' | 'shimmer' | 'verse';
|
||||
}
|
||||
export declare namespace SessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
expires_after?: ClientSecret.ExpiresAfter;
|
||||
}
|
||||
namespace ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
interface ExpiresAfter {
|
||||
/**
|
||||
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
||||
* currently supported.
|
||||
*/
|
||||
anchor: 'created_at';
|
||||
/**
|
||||
* The number of seconds from the anchor point to the expiration. Select a value
|
||||
* between `10` and `7200`.
|
||||
*/
|
||||
seconds?: number;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription, defaults to off and can be set to
|
||||
* `null` to turn off once on. Input audio transcription is not native to the
|
||||
* model, since the model consumes audio directly. Transcription runs
|
||||
* asynchronously through
|
||||
* [the /audio/transcriptions endpoint](https://platform.openai.com/docs/api-reference/audio/createTranscription)
|
||||
* and should be treated as guidance of input audio content rather than precisely
|
||||
* what the model heard. The client can optionally set the language and prompt for
|
||||
* transcription, these offer additional guidance to the transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: string;
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
interface Tool {
|
||||
/**
|
||||
* The description of the function, including guidance on when and how to call it,
|
||||
* and guidance about what to tell the user when calling (if anything).
|
||||
*/
|
||||
description?: string;
|
||||
/**
|
||||
* The name of the function.
|
||||
*/
|
||||
name?: string;
|
||||
/**
|
||||
* Parameters of the function in JSON Schema.
|
||||
*/
|
||||
parameters?: unknown;
|
||||
/**
|
||||
* The type of the tool, i.e. `function`.
|
||||
*/
|
||||
type?: 'function';
|
||||
}
|
||||
/**
|
||||
* Granular configuration for tracing.
|
||||
*/
|
||||
interface TracingConfiguration {
|
||||
/**
|
||||
* The group id to attach to this trace to enable filtering and grouping in the
|
||||
* traces dashboard.
|
||||
*/
|
||||
group_id?: string;
|
||||
/**
|
||||
* The arbitrary metadata to attach to this trace to enable filtering in the traces
|
||||
* dashboard.
|
||||
*/
|
||||
metadata?: unknown;
|
||||
/**
|
||||
* The name of the workflow to attach to this trace. This is used to name the trace
|
||||
* in the traces dashboard.
|
||||
*/
|
||||
workflow_name?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
export declare namespace Sessions {
|
||||
export { type Session as Session, type SessionCreateResponse as SessionCreateResponse, type SessionCreateParams as SessionCreateParams, };
|
||||
}
|
||||
//# sourceMappingURL=sessions.d.ts.map
|
||||
1
node_modules/openai/resources/beta/realtime/sessions.d.ts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/sessions.d.ts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
32
node_modules/openai/resources/beta/realtime/sessions.js
generated
vendored
Normal file
32
node_modules/openai/resources/beta/realtime/sessions.js
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
"use strict";
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.Sessions = void 0;
|
||||
const resource_1 = require("../../../core/resource.js");
|
||||
const headers_1 = require("../../../internal/headers.js");
|
||||
class Sessions extends resource_1.APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API. Can be configured with the same session parameters as the
|
||||
* `session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const session =
|
||||
* await client.beta.realtime.sessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body, options) {
|
||||
return this._client.post('/realtime/sessions', {
|
||||
body,
|
||||
...options,
|
||||
headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.Sessions = Sessions;
|
||||
//# sourceMappingURL=sessions.js.map
|
||||
1
node_modules/openai/resources/beta/realtime/sessions.js.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/sessions.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"sessions.js","sourceRoot":"","sources":["../../../src/resources/beta/realtime/sessions.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;AAEtF,wDAAqD;AAErD,0DAAyD;AAGzD,MAAa,QAAS,SAAQ,sBAAW;IACvC;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAyB,EAAE,OAAwB;QACxD,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,oBAAoB,EAAE;YAC7C,IAAI;YACJ,GAAG,OAAO;YACV,OAAO,EAAE,IAAA,sBAAY,EAAC,CAAC,EAAE,aAAa,EAAE,eAAe,EAAE,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;SAC9E,CAAC,CAAC;IACL,CAAC;CACF;AAvBD,4BAuBC"}
|
||||
28
node_modules/openai/resources/beta/realtime/sessions.mjs
generated
vendored
Normal file
28
node_modules/openai/resources/beta/realtime/sessions.mjs
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
import { APIResource } from "../../../core/resource.mjs";
|
||||
import { buildHeaders } from "../../../internal/headers.mjs";
|
||||
export class Sessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API. Can be configured with the same session parameters as the
|
||||
* `session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const session =
|
||||
* await client.beta.realtime.sessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body, options) {
|
||||
return this._client.post('/realtime/sessions', {
|
||||
body,
|
||||
...options,
|
||||
headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=sessions.mjs.map
|
||||
1
node_modules/openai/resources/beta/realtime/sessions.mjs.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/sessions.mjs.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"sessions.mjs","sourceRoot":"","sources":["../../../src/resources/beta/realtime/sessions.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,WAAW,EAAE;OAEf,EAAE,YAAY,EAAE;AAGvB,MAAM,OAAO,QAAS,SAAQ,WAAW;IACvC;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAyB,EAAE,OAAwB;QACxD,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,oBAAoB,EAAE;YAC7C,IAAI;YACJ,GAAG,OAAO;YACV,OAAO,EAAE,YAAY,CAAC,CAAC,EAAE,aAAa,EAAE,eAAe,EAAE,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;SAC9E,CAAC,CAAC;IACL,CAAC;CACF"}
|
||||
299
node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts
generated
vendored
Normal file
299
node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
import { APIResource } from "../../../core/resource.mjs";
|
||||
import { APIPromise } from "../../../core/api-promise.mjs";
|
||||
import { RequestOptions } from "../../../internal/request-options.mjs";
|
||||
export declare class TranscriptionSessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
* the same session parameters as the `transcription_session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const transcriptionSession =
|
||||
* await client.beta.realtime.transcriptionSessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
|
||||
}
|
||||
/**
|
||||
* A new Realtime transcription session configuration.
|
||||
*
|
||||
* When a session is created on the server via REST API, the session object also
|
||||
* contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
|
||||
* not present when a session is updated via the WebSocket API.
|
||||
*/
|
||||
export interface TranscriptionSession {
|
||||
/**
|
||||
* Ephemeral key returned by the API. Only present when the session is created on
|
||||
* the server via REST API.
|
||||
*/
|
||||
client_secret: TranscriptionSession.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
input_audio_format?: string;
|
||||
/**
|
||||
* Configuration of the transcription model.
|
||||
*/
|
||||
input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
turn_detection?: TranscriptionSession.TurnDetection;
|
||||
}
|
||||
export declare namespace TranscriptionSession {
|
||||
/**
|
||||
* Ephemeral key returned by the API. Only present when the session is created on
|
||||
* the server via REST API.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Timestamp for when the token expires. Currently, all tokens expire after one
|
||||
* minute.
|
||||
*/
|
||||
expires_at: number;
|
||||
/**
|
||||
* Ephemeral key usable in client environments to authenticate connections to the
|
||||
* Realtime API. Use this in client-side environments rather than a standard API
|
||||
* token, which should only be used server-side.
|
||||
*/
|
||||
value: string;
|
||||
}
|
||||
/**
|
||||
* Configuration of the transcription model.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription. Can be `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, or `whisper-1`.
|
||||
*/
|
||||
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. The
|
||||
* [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
||||
* should match the audio language.
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
||||
* Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
||||
* With shorter values the model will respond more quickly, but may jump in on
|
||||
* short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
||||
* threshold will require louder audio to activate the model, and thus might
|
||||
* perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection, only `server_vad` is currently supported.
|
||||
*/
|
||||
type?: string;
|
||||
}
|
||||
}
|
||||
export interface TranscriptionSessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
client_secret?: TranscriptionSessionCreateParams.ClientSecret;
|
||||
/**
|
||||
* The set of items to include in the transcription. Current available items are:
|
||||
*
|
||||
* - `item.input_audio_transcription.logprobs`
|
||||
*/
|
||||
include?: Array<string>;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription. The client can optionally set the
|
||||
* language and prompt for transcription, these offer additional guidance to the
|
||||
* transcription service.
|
||||
*/
|
||||
input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
|
||||
}
|
||||
export declare namespace TranscriptionSessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
expires_at?: ClientSecret.ExpiresAt;
|
||||
}
|
||||
namespace ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
interface ExpiresAt {
|
||||
/**
|
||||
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
||||
* currently supported.
|
||||
*/
|
||||
anchor?: 'created_at';
|
||||
/**
|
||||
* The number of seconds from the anchor point to the expiration. Select a value
|
||||
* between `10` and `7200`.
|
||||
*/
|
||||
seconds?: number;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription. The client can optionally set the
|
||||
* language and prompt for transcription, these offer additional guidance to the
|
||||
* transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs. Not available for transcription sessions.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs. Not available for transcription sessions.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
export declare namespace TranscriptionSessions {
|
||||
export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
|
||||
}
|
||||
//# sourceMappingURL=transcription-sessions.d.mts.map
|
||||
1
node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/transcription-sessions.d.mts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"transcription-sessions.d.mts","sourceRoot":"","sources":["../../../src/resources/beta/realtime/transcription-sessions.ts"],"names":[],"mappings":"OAEO,EAAE,WAAW,EAAE;OACf,EAAE,UAAU,EAAE;OAEd,EAAE,cAAc,EAAE;AAEzB,qBAAa,qBAAsB,SAAQ,WAAW;IACpD;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAI,EAAE,gCAAgC,EAAE,OAAO,CAAC,EAAE,cAAc,GAAG,UAAU,CAAC,oBAAoB,CAAC;CAO3G;AAED;;;;;;GAMG;AACH,MAAM,WAAW,oBAAoB;IACnC;;;OAGG;IACH,aAAa,EAAE,oBAAoB,CAAC,YAAY,CAAC;IAEjD;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAE5B;;OAEG;IACH,yBAAyB,CAAC,EAAE,oBAAoB,CAAC,uBAAuB,CAAC;IAEzE;;;OAGG;IACH,UAAU,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,OAAO,CAAC,CAAC;IAErC;;;;OAIG;IACH,cAAc,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC;CACrD;AAED,yBAAiB,oBAAoB,CAAC;IACpC;;;OAGG;IACH,UAAiB,YAAY;QAC3B;;;WAGG;QACH,UAAU,EAAE,MAAM,CAAC;QAEnB;;;;WAIG;QACH,KAAK,EAAE,MAAM,CAAC;KACf;IAED;;OAEG;IACH,UAAiB,uBAAuB;QACtC;;;;WAIG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAElB;;;WAGG;QACH,KAAK,CAAC,EAAE,mBAAmB,GAAG,wBAAwB,GAAG,WAAW,CAAC;QAErE;;;;;WAKG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB;IAED;;;;OAIG;IACH,UAAiB,aAAa;QAC5B;;;WAGG;QACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAE3B;;;;WAIG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAE7B;;;;WAIG;QACH,SAAS,CAAC,EAAE,MAAM,CAAC;QAEnB;;WAEG;QACH,IAAI,CAAC,EAAE,MAAM,CAAC;KACf;CACF;AAED,MAAM,WAAW,gCAAgC;IAC/C;;OAEG;IACH,aAAa,CAAC,EAAE,gCAAgC,CAAC,YAAY,CAAC;IAE9D;;;;OAIG;IACH,OAAO,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC;IAExB;;;;OAIG;IACH,kBAAkB,CAAC,EAAE,OAAO,GAAG,WAAW,GAAG,WAAW,CAAC;IAEzD;;;;;;OAMG;IACH,2BAA2B,CAAC,EAAE,gCAAgC,CAAC,wBAAwB,CAAC;IAExF;;;;OAIG;IACH,yBAAyB,CAAC,EAAE,gCAAgC,CAAC,uBAAuB,CAAC;IAErF;;;OAGG;IACH,UAAU,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,OAAO,CAAC,CAAC;IAErC;;;;;;;;;;;OAWG;IACH,cAAc,CAAC,EAAE,gCAAgC,CAAC,aAAa,CAAC;CACjE;AAED,yBAAiB,gCAAgC,CAAC;IAChD;;OAEG;IACH,UAAiB,YAAY;QAC3B;;WAEG;QACH,UAAU,CAAC,EAAE,YAAY,CAAC,SAAS,CAAC;KACrC;IAED,UAAiB,YAAY,CAAC;QAC5B;;WAEG;QACH,UAAiB,SAAS;YACxB;;;eAGG;YACH,MAAM,CAAC,EAAE,YAAY,CAAC;YAEtB;;;eAGG;YACH,OAAO,CAAC,EAAE,MAAM,CAAC;SAClB;KACF;IAED;;;;;;OAMG;IACH,UAAiB,wBAAwB;QACvC;;;;WAIG;QACH,IAAI,CAAC,EAAE,YAAY,GAAG,WAAW,CAAC;KACnC;IAED;;;;OAIG;IACH,UAAiB,uBAAuB;QACtC;;;;WAIG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAElB;;;WAGG;QACH,KAAK,CAAC,EAAE,mBAAmB,GAAG,wBAAwB,GAAG,WAAW,CAAC;QAErE;;;;;;WAMG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB;IAED;;;;;;;;;;;OAWG;IACH,UAAiB,aAAa;QAC5B;;;WAGG;QACH,eAAe,CAAC,EAAE,OAAO,CAAC;QAE1B;;;;WAIG;QACH,SAAS,CAAC,EAAE,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,MAAM,CAAC;QAE/C;;;;WAIG;QACH,kBAAkB,CAAC,EAAE,OAAO,CAAC;QAE7B;;;WAGG;QACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAE3B;;;;WAIG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAE7B;;;;WAIG;QACH,SAAS,CAAC,EAAE,MAAM,CAAC;QAEnB;;WAEG;QACH,IAAI,CAAC,EAAE,YAAY,GAAG,cAAc,CAAC;KACtC;CACF;AAED,MAAM,CAAC,OAAO,WAAW,qBAAqB,CAAC;IAC7C,OAAO,EACL,KAAK,oBAAoB,IAAI,oBAAoB,EACjD,KAAK,gCAAgC,IAAI,gCAAgC,GAC1E,CAAC;CACH"}
|
||||
299
node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts
generated
vendored
Normal file
299
node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
import { APIResource } from "../../../core/resource.js";
|
||||
import { APIPromise } from "../../../core/api-promise.js";
|
||||
import { RequestOptions } from "../../../internal/request-options.js";
|
||||
export declare class TranscriptionSessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
* the same session parameters as the `transcription_session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const transcriptionSession =
|
||||
* await client.beta.realtime.transcriptionSessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body: TranscriptionSessionCreateParams, options?: RequestOptions): APIPromise<TranscriptionSession>;
|
||||
}
|
||||
/**
|
||||
* A new Realtime transcription session configuration.
|
||||
*
|
||||
* When a session is created on the server via REST API, the session object also
|
||||
* contains an ephemeral key. Default TTL for keys is 10 minutes. This property is
|
||||
* not present when a session is updated via the WebSocket API.
|
||||
*/
|
||||
export interface TranscriptionSession {
|
||||
/**
|
||||
* Ephemeral key returned by the API. Only present when the session is created on
|
||||
* the server via REST API.
|
||||
*/
|
||||
client_secret: TranscriptionSession.ClientSecret;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`.
|
||||
*/
|
||||
input_audio_format?: string;
|
||||
/**
|
||||
* Configuration of the transcription model.
|
||||
*/
|
||||
input_audio_transcription?: TranscriptionSession.InputAudioTranscription;
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
turn_detection?: TranscriptionSession.TurnDetection;
|
||||
}
|
||||
export declare namespace TranscriptionSession {
|
||||
/**
|
||||
* Ephemeral key returned by the API. Only present when the session is created on
|
||||
* the server via REST API.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Timestamp for when the token expires. Currently, all tokens expire after one
|
||||
* minute.
|
||||
*/
|
||||
expires_at: number;
|
||||
/**
|
||||
* Ephemeral key usable in client environments to authenticate connections to the
|
||||
* Realtime API. Use this in client-side environments rather than a standard API
|
||||
* token, which should only be used server-side.
|
||||
*/
|
||||
value: string;
|
||||
}
|
||||
/**
|
||||
* Configuration of the transcription model.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription. Can be `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, or `whisper-1`.
|
||||
*/
|
||||
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. The
|
||||
* [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
|
||||
* should match the audio language.
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection. Can be set to `null` to turn off. Server VAD
|
||||
* means that the model will detect the start and end of speech based on audio
|
||||
* volume and respond at the end of user speech.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Amount of audio to include before the VAD detected speech (in milliseconds).
|
||||
* Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Duration of silence to detect speech stop (in milliseconds). Defaults to 500ms.
|
||||
* With shorter values the model will respond more quickly, but may jump in on
|
||||
* short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Activation threshold for VAD (0.0 to 1.0), this defaults to 0.5. A higher
|
||||
* threshold will require louder audio to activate the model, and thus might
|
||||
* perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection, only `server_vad` is currently supported.
|
||||
*/
|
||||
type?: string;
|
||||
}
|
||||
}
|
||||
export interface TranscriptionSessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
client_secret?: TranscriptionSessionCreateParams.ClientSecret;
|
||||
/**
|
||||
* The set of items to include in the transcription. Current available items are:
|
||||
*
|
||||
* - `item.input_audio_transcription.logprobs`
|
||||
*/
|
||||
include?: Array<string>;
|
||||
/**
|
||||
* The format of input audio. Options are `pcm16`, `g711_ulaw`, or `g711_alaw`. For
|
||||
* `pcm16`, input audio must be 16-bit PCM at a 24kHz sample rate, single channel
|
||||
* (mono), and little-endian byte order.
|
||||
*/
|
||||
input_audio_format?: 'pcm16' | 'g711_ulaw' | 'g711_alaw';
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
input_audio_noise_reduction?: TranscriptionSessionCreateParams.InputAudioNoiseReduction;
|
||||
/**
|
||||
* Configuration for input audio transcription. The client can optionally set the
|
||||
* language and prompt for transcription, these offer additional guidance to the
|
||||
* transcription service.
|
||||
*/
|
||||
input_audio_transcription?: TranscriptionSessionCreateParams.InputAudioTranscription;
|
||||
/**
|
||||
* The set of modalities the model can respond with. To disable audio, set this to
|
||||
* ["text"].
|
||||
*/
|
||||
modalities?: Array<'text' | 'audio'>;
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
turn_detection?: TranscriptionSessionCreateParams.TurnDetection;
|
||||
}
|
||||
export declare namespace TranscriptionSessionCreateParams {
|
||||
/**
|
||||
* Configuration options for the generated client secret.
|
||||
*/
|
||||
interface ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
expires_at?: ClientSecret.ExpiresAt;
|
||||
}
|
||||
namespace ClientSecret {
|
||||
/**
|
||||
* Configuration for the ephemeral token expiration.
|
||||
*/
|
||||
interface ExpiresAt {
|
||||
/**
|
||||
* The anchor point for the ephemeral token expiration. Only `created_at` is
|
||||
* currently supported.
|
||||
*/
|
||||
anchor?: 'created_at';
|
||||
/**
|
||||
* The number of seconds from the anchor point to the expiration. Select a value
|
||||
* between `10` and `7200`.
|
||||
*/
|
||||
seconds?: number;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio noise reduction. This can be set to `null` to turn
|
||||
* off. Noise reduction filters audio added to the input audio buffer before it is
|
||||
* sent to VAD and the model. Filtering the audio can improve VAD and turn
|
||||
* detection accuracy (reducing false positives) and model performance by improving
|
||||
* perception of the input audio.
|
||||
*/
|
||||
interface InputAudioNoiseReduction {
|
||||
/**
|
||||
* Type of noise reduction. `near_field` is for close-talking microphones such as
|
||||
* headphones, `far_field` is for far-field microphones such as laptop or
|
||||
* conference room microphones.
|
||||
*/
|
||||
type?: 'near_field' | 'far_field';
|
||||
}
|
||||
/**
|
||||
* Configuration for input audio transcription. The client can optionally set the
|
||||
* language and prompt for transcription, these offer additional guidance to the
|
||||
* transcription service.
|
||||
*/
|
||||
interface InputAudioTranscription {
|
||||
/**
|
||||
* The language of the input audio. Supplying the input language in
|
||||
* [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
|
||||
* format will improve accuracy and latency.
|
||||
*/
|
||||
language?: string;
|
||||
/**
|
||||
* The model to use for transcription, current options are `gpt-4o-transcribe`,
|
||||
* `gpt-4o-mini-transcribe`, and `whisper-1`.
|
||||
*/
|
||||
model?: 'gpt-4o-transcribe' | 'gpt-4o-mini-transcribe' | 'whisper-1';
|
||||
/**
|
||||
* An optional text to guide the model's style or continue a previous audio
|
||||
* segment. For `whisper-1`, the
|
||||
* [prompt is a list of keywords](https://platform.openai.com/docs/guides/speech-to-text#prompting).
|
||||
* For `gpt-4o-transcribe` models, the prompt is a free text string, for example
|
||||
* "expect words related to technology".
|
||||
*/
|
||||
prompt?: string;
|
||||
}
|
||||
/**
|
||||
* Configuration for turn detection, ether Server VAD or Semantic VAD. This can be
|
||||
* set to `null` to turn off, in which case the client must manually trigger model
|
||||
* response. Server VAD means that the model will detect the start and end of
|
||||
* speech based on audio volume and respond at the end of user speech. Semantic VAD
|
||||
* is more advanced and uses a turn detection model (in conjuction with VAD) to
|
||||
* semantically estimate whether the user has finished speaking, then dynamically
|
||||
* sets a timeout based on this probability. For example, if user audio trails off
|
||||
* with "uhhm", the model will score a low probability of turn end and wait longer
|
||||
* for the user to continue speaking. This can be useful for more natural
|
||||
* conversations, but may have a higher latency.
|
||||
*/
|
||||
interface TurnDetection {
|
||||
/**
|
||||
* Whether or not to automatically generate a response when a VAD stop event
|
||||
* occurs. Not available for transcription sessions.
|
||||
*/
|
||||
create_response?: boolean;
|
||||
/**
|
||||
* Used only for `semantic_vad` mode. The eagerness of the model to respond. `low`
|
||||
* will wait longer for the user to continue speaking, `high` will respond more
|
||||
* quickly. `auto` is the default and is equivalent to `medium`.
|
||||
*/
|
||||
eagerness?: 'low' | 'medium' | 'high' | 'auto';
|
||||
/**
|
||||
* Whether or not to automatically interrupt any ongoing response with output to
|
||||
* the default conversation (i.e. `conversation` of `auto`) when a VAD start event
|
||||
* occurs. Not available for transcription sessions.
|
||||
*/
|
||||
interrupt_response?: boolean;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Amount of audio to include before the VAD
|
||||
* detected speech (in milliseconds). Defaults to 300ms.
|
||||
*/
|
||||
prefix_padding_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Duration of silence to detect speech stop (in
|
||||
* milliseconds). Defaults to 500ms. With shorter values the model will respond
|
||||
* more quickly, but may jump in on short pauses from the user.
|
||||
*/
|
||||
silence_duration_ms?: number;
|
||||
/**
|
||||
* Used only for `server_vad` mode. Activation threshold for VAD (0.0 to 1.0), this
|
||||
* defaults to 0.5. A higher threshold will require louder audio to activate the
|
||||
* model, and thus might perform better in noisy environments.
|
||||
*/
|
||||
threshold?: number;
|
||||
/**
|
||||
* Type of turn detection.
|
||||
*/
|
||||
type?: 'server_vad' | 'semantic_vad';
|
||||
}
|
||||
}
|
||||
export declare namespace TranscriptionSessions {
|
||||
export { type TranscriptionSession as TranscriptionSession, type TranscriptionSessionCreateParams as TranscriptionSessionCreateParams, };
|
||||
}
|
||||
//# sourceMappingURL=transcription-sessions.d.ts.map
|
||||
1
node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/transcription-sessions.d.ts.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"transcription-sessions.d.ts","sourceRoot":"","sources":["../../../src/resources/beta/realtime/transcription-sessions.ts"],"names":[],"mappings":"OAEO,EAAE,WAAW,EAAE;OACf,EAAE,UAAU,EAAE;OAEd,EAAE,cAAc,EAAE;AAEzB,qBAAa,qBAAsB,SAAQ,WAAW;IACpD;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAI,EAAE,gCAAgC,EAAE,OAAO,CAAC,EAAE,cAAc,GAAG,UAAU,CAAC,oBAAoB,CAAC;CAO3G;AAED;;;;;;GAMG;AACH,MAAM,WAAW,oBAAoB;IACnC;;;OAGG;IACH,aAAa,EAAE,oBAAoB,CAAC,YAAY,CAAC;IAEjD;;OAEG;IACH,kBAAkB,CAAC,EAAE,MAAM,CAAC;IAE5B;;OAEG;IACH,yBAAyB,CAAC,EAAE,oBAAoB,CAAC,uBAAuB,CAAC;IAEzE;;;OAGG;IACH,UAAU,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,OAAO,CAAC,CAAC;IAErC;;;;OAIG;IACH,cAAc,CAAC,EAAE,oBAAoB,CAAC,aAAa,CAAC;CACrD;AAED,yBAAiB,oBAAoB,CAAC;IACpC;;;OAGG;IACH,UAAiB,YAAY;QAC3B;;;WAGG;QACH,UAAU,EAAE,MAAM,CAAC;QAEnB;;;;WAIG;QACH,KAAK,EAAE,MAAM,CAAC;KACf;IAED;;OAEG;IACH,UAAiB,uBAAuB;QACtC;;;;WAIG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAElB;;;WAGG;QACH,KAAK,CAAC,EAAE,mBAAmB,GAAG,wBAAwB,GAAG,WAAW,CAAC;QAErE;;;;;WAKG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB;IAED;;;;OAIG;IACH,UAAiB,aAAa;QAC5B;;;WAGG;QACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAE3B;;;;WAIG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAE7B;;;;WAIG;QACH,SAAS,CAAC,EAAE,MAAM,CAAC;QAEnB;;WAEG;QACH,IAAI,CAAC,EAAE,MAAM,CAAC;KACf;CACF;AAED,MAAM,WAAW,gCAAgC;IAC/C;;OAEG;IACH,aAAa,CAAC,EAAE,gCAAgC,CAAC,YAAY,CAAC;IAE9D;;;;OAIG;IACH,OAAO,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,CAAC;IAExB;;;;OAIG;IACH,kBAAkB,CAAC,EAAE,OAAO,GAAG,WAAW,GAAG,WAAW,CAAC;IAEzD;;;;;;OAMG;IACH,2BAA2B,CAAC,EAAE,gCAAgC,CAAC,wBAAwB,CAAC;IAExF;;;;OAIG;IACH,yBAAyB,CAAC,EAAE,gCAAgC,CAAC,uBAAuB,CAAC;IAErF;;;OAGG;IACH,UAAU,CAAC,EAAE,KAAK,CAAC,MAAM,GAAG,OAAO,CAAC,CAAC;IAErC;;;;;;;;;;;OAWG;IACH,cAAc,CAAC,EAAE,gCAAgC,CAAC,aAAa,CAAC;CACjE;AAED,yBAAiB,gCAAgC,CAAC;IAChD;;OAEG;IACH,UAAiB,YAAY;QAC3B;;WAEG;QACH,UAAU,CAAC,EAAE,YAAY,CAAC,SAAS,CAAC;KACrC;IAED,UAAiB,YAAY,CAAC;QAC5B;;WAEG;QACH,UAAiB,SAAS;YACxB;;;eAGG;YACH,MAAM,CAAC,EAAE,YAAY,CAAC;YAEtB;;;eAGG;YACH,OAAO,CAAC,EAAE,MAAM,CAAC;SAClB;KACF;IAED;;;;;;OAMG;IACH,UAAiB,wBAAwB;QACvC;;;;WAIG;QACH,IAAI,CAAC,EAAE,YAAY,GAAG,WAAW,CAAC;KACnC;IAED;;;;OAIG;IACH,UAAiB,uBAAuB;QACtC;;;;WAIG;QACH,QAAQ,CAAC,EAAE,MAAM,CAAC;QAElB;;;WAGG;QACH,KAAK,CAAC,EAAE,mBAAmB,GAAG,wBAAwB,GAAG,WAAW,CAAC;QAErE;;;;;;WAMG;QACH,MAAM,CAAC,EAAE,MAAM,CAAC;KACjB;IAED;;;;;;;;;;;OAWG;IACH,UAAiB,aAAa;QAC5B;;;WAGG;QACH,eAAe,CAAC,EAAE,OAAO,CAAC;QAE1B;;;;WAIG;QACH,SAAS,CAAC,EAAE,KAAK,GAAG,QAAQ,GAAG,MAAM,GAAG,MAAM,CAAC;QAE/C;;;;WAIG;QACH,kBAAkB,CAAC,EAAE,OAAO,CAAC;QAE7B;;;WAGG;QACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;QAE3B;;;;WAIG;QACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;QAE7B;;;;WAIG;QACH,SAAS,CAAC,EAAE,MAAM,CAAC;QAEnB;;WAEG;QACH,IAAI,CAAC,EAAE,YAAY,GAAG,cAAc,CAAC;KACtC;CACF;AAED,MAAM,CAAC,OAAO,WAAW,qBAAqB,CAAC;IAC7C,OAAO,EACL,KAAK,oBAAoB,IAAI,oBAAoB,EACjD,KAAK,gCAAgC,IAAI,gCAAgC,GAC1E,CAAC;CACH"}
|
||||
32
node_modules/openai/resources/beta/realtime/transcription-sessions.js
generated
vendored
Normal file
32
node_modules/openai/resources/beta/realtime/transcription-sessions.js
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
"use strict";
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.TranscriptionSessions = void 0;
|
||||
const resource_1 = require("../../../core/resource.js");
|
||||
const headers_1 = require("../../../internal/headers.js");
|
||||
class TranscriptionSessions extends resource_1.APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
* the same session parameters as the `transcription_session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const transcriptionSession =
|
||||
* await client.beta.realtime.transcriptionSessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body, options) {
|
||||
return this._client.post('/realtime/transcription_sessions', {
|
||||
body,
|
||||
...options,
|
||||
headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
||||
});
|
||||
}
|
||||
}
|
||||
exports.TranscriptionSessions = TranscriptionSessions;
|
||||
//# sourceMappingURL=transcription-sessions.js.map
|
||||
1
node_modules/openai/resources/beta/realtime/transcription-sessions.js.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/transcription-sessions.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"transcription-sessions.js","sourceRoot":"","sources":["../../../src/resources/beta/realtime/transcription-sessions.ts"],"names":[],"mappings":";AAAA,sFAAsF;;;AAEtF,wDAAqD;AAErD,0DAAyD;AAGzD,MAAa,qBAAsB,SAAQ,sBAAW;IACpD;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAsC,EAAE,OAAwB;QACrE,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,kCAAkC,EAAE;YAC3D,IAAI;YACJ,GAAG,OAAO;YACV,OAAO,EAAE,IAAA,sBAAY,EAAC,CAAC,EAAE,aAAa,EAAE,eAAe,EAAE,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;SAC9E,CAAC,CAAC;IACL,CAAC;CACF;AAvBD,sDAuBC"}
|
||||
28
node_modules/openai/resources/beta/realtime/transcription-sessions.mjs
generated
vendored
Normal file
28
node_modules/openai/resources/beta/realtime/transcription-sessions.mjs
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||||
import { APIResource } from "../../../core/resource.mjs";
|
||||
import { buildHeaders } from "../../../internal/headers.mjs";
|
||||
export class TranscriptionSessions extends APIResource {
|
||||
/**
|
||||
* Create an ephemeral API token for use in client-side applications with the
|
||||
* Realtime API specifically for realtime transcriptions. Can be configured with
|
||||
* the same session parameters as the `transcription_session.update` client event.
|
||||
*
|
||||
* It responds with a session object, plus a `client_secret` key which contains a
|
||||
* usable ephemeral API token that can be used to authenticate browser clients for
|
||||
* the Realtime API.
|
||||
*
|
||||
* @example
|
||||
* ```ts
|
||||
* const transcriptionSession =
|
||||
* await client.beta.realtime.transcriptionSessions.create();
|
||||
* ```
|
||||
*/
|
||||
create(body, options) {
|
||||
return this._client.post('/realtime/transcription_sessions', {
|
||||
body,
|
||||
...options,
|
||||
headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
||||
});
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=transcription-sessions.mjs.map
|
||||
1
node_modules/openai/resources/beta/realtime/transcription-sessions.mjs.map
generated
vendored
Normal file
1
node_modules/openai/resources/beta/realtime/transcription-sessions.mjs.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"transcription-sessions.mjs","sourceRoot":"","sources":["../../../src/resources/beta/realtime/transcription-sessions.ts"],"names":[],"mappings":"AAAA,sFAAsF;OAE/E,EAAE,WAAW,EAAE;OAEf,EAAE,YAAY,EAAE;AAGvB,MAAM,OAAO,qBAAsB,SAAQ,WAAW;IACpD;;;;;;;;;;;;;;OAcG;IACH,MAAM,CAAC,IAAsC,EAAE,OAAwB;QACrE,OAAO,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,kCAAkC,EAAE;YAC3D,IAAI;YACJ,GAAG,OAAO;YACV,OAAO,EAAE,YAAY,CAAC,CAAC,EAAE,aAAa,EAAE,eAAe,EAAE,EAAE,OAAO,EAAE,OAAO,CAAC,CAAC;SAC9E,CAAC,CAAC;IACL,CAAC;CACF"}
|
||||
Reference in New Issue
Block a user