OpenAiLanguageModel.ts overview
Since v1.0.0
Exports Grouped by Category
AI Models
model
Signature
declare const model: (
model: (string & {}) | Model,
config?: Omit<Config.Service, "model">
) => AiModel.AiModel<AiLanguageModel.AiLanguageModel | Tokenizer.Tokenizer, OpenAiClient>
Since v1.0.0
Configuration
withConfigOverride
Signature
declare const withConfigOverride: {
(overrides: Config.Service): <A, E, R>(self: Effect.Effect<A, E, R>) => Effect.Effect<A, E, R>
<A, E, R>(self: Effect.Effect<A, E, R>, overrides: Config.Service): Effect.Effect<A, E, R>
}
Since v1.0.0
Context
Config (class)
Signature
declare class Config
Since v1.0.0
ProviderMetadata (class)
Signature
declare class ProviderMetadata
Since v1.0.0
Models
Model (type alias)
Signature
type Model = typeof Generated.ModelIdsSharedEnum.Encoded
Since v1.0.0
utils
Config (namespace)
Since v1.0.0
Service (interface)
Signature
export interface Service
extends Simplify<
Partial<
Omit<
typeof Generated.CreateChatCompletionRequest.Encoded,
"messages" | "tools" | "tool_choice" | "stream" | "stream_options" | "functions"
>
>
> {}
Since v1.0.0
ProviderMetadata (namespace)
Since v1.0.0
Service (interface)
Signature
export interface Service {
/**
* Specifies the latency tier that was used for processing the request.
*/
readonly serviceTier?: string
/**
* This fingerprint represents the backend configuration that the model
* executes with.
*
* Can be used in conjunction with the seed request parameter to understand
* when backend changes have been made that might impact determinism.
*/
readonly systemFingerprint: string
/**
* When using predicted outputs, the number of tokens in the prediction
* that appeared in the completion.
*/
readonly acceptedPredictionTokens: number
/**
* When using predicted outputs, the number of tokens in the prediction
* that did not appear in the completion. However, like reasoning tokens,
* these tokens are still counted in the total completion tokens for
* purposes of billing, output, and context window limits.
*/
readonly rejectedPredictionTokens: number
/**
* Audio tokens present in the prompt.
*/
readonly inputAudioTokens: number
/**
* Audio tokens generated by the model.
*/
readonly outputAudioTokens: number
}
Since v1.0.0