import * as Core from 'openai/core'; import { type CompletionUsage } from 'openai/resources/completions'; import { type ChatCompletion, type ChatCompletionMessage, type ChatCompletionMessageParam, type ChatCompletionCreateParams } from 'openai/resources/chat/completions'; import { type BaseFunctionsArgs } from "./RunnableFunction.js"; import { ChatCompletionFunctionRunnerParams, ChatCompletionToolRunnerParams } from "./ChatCompletionRunner.js"; import { ChatCompletionStreamingFunctionRunnerParams, ChatCompletionStreamingToolRunnerParams } from "./ChatCompletionStreamingRunner.js"; import { BaseEvents, EventStream } from "./EventStream.js"; import { ParsedChatCompletion } from "../resources/beta/chat/completions.js"; import OpenAI from "../index.js"; export interface RunnerOptions extends Core.RequestOptions { /** How many requests to make before canceling. Default 10. */ maxChatCompletions?: number; } export declare class AbstractChatCompletionRunner extends EventStream { #private; protected _chatCompletions: ParsedChatCompletion[]; messages: ChatCompletionMessageParam[]; protected _addChatCompletion(this: AbstractChatCompletionRunner, chatCompletion: ParsedChatCompletion): ParsedChatCompletion; protected _addMessage(this: AbstractChatCompletionRunner, message: ChatCompletionMessageParam, emit?: boolean): void; /** * @returns a promise that resolves with the final ChatCompletion, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletion. */ finalChatCompletion(): Promise>; /** * @returns a promise that resolves with the content of the final ChatCompletionMessage, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. */ finalContent(): Promise; /** * @returns a promise that resolves with the the final assistant ChatCompletionMessage response, * or rejects if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. */ finalMessage(): Promise; /** * @returns a promise that resolves with the content of the final FunctionCall, or rejects * if an error occurred or the stream ended prematurely without producing a ChatCompletionMessage. */ finalFunctionCall(): Promise; finalFunctionCallResult(): Promise; totalUsage(): Promise; allChatCompletions(): ChatCompletion[]; protected _emitFinal(this: AbstractChatCompletionRunner): void; protected _createChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise>; protected _runChatCompletion(client: OpenAI, params: ChatCompletionCreateParams, options?: Core.RequestOptions): Promise; protected _runFunctions(client: OpenAI, params: ChatCompletionFunctionRunnerParams | ChatCompletionStreamingFunctionRunnerParams, options?: RunnerOptions): Promise; protected _runTools(client: OpenAI, params: ChatCompletionToolRunnerParams | ChatCompletionStreamingToolRunnerParams, options?: RunnerOptions): Promise; } export interface AbstractChatCompletionRunnerEvents extends BaseEvents { functionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; message: (message: ChatCompletionMessageParam) => void; chatCompletion: (completion: ChatCompletion) => void; finalContent: (contentSnapshot: string) => void; finalMessage: (message: ChatCompletionMessageParam) => void; finalChatCompletion: (completion: ChatCompletion) => void; finalFunctionCall: (functionCall: ChatCompletionMessage.FunctionCall) => void; functionCallResult: (content: string) => void; finalFunctionCallResult: (content: string) => void; totalUsage: (usage: CompletionUsage) => void; } //# sourceMappingURL=AbstractChatCompletionRunner.d.ts.map