agsamantha/node_modules/@langchain/openai/dist/chat_models.d.ts

652 lines
20 KiB
TypeScript
Raw Normal View History

2024-10-02 15:15:21 -05:00
import { type ClientOptions, OpenAI as OpenAIClient } from "openai";
import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
import { AIMessageChunk, type BaseMessage } from "@langchain/core/messages";
import { ChatGenerationChunk, type ChatResult } from "@langchain/core/outputs";
import { BaseChatModel, BindToolsInput, LangSmithParams, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
import { type BaseFunctionCallOptions, type BaseLanguageModelInput, type StructuredOutputMethodOptions, type StructuredOutputMethodParams } from "@langchain/core/language_models/base";
import { z } from "zod";
import { Runnable } from "@langchain/core/runnables";
import type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput, ChatOpenAIResponseFormat } from "./types.js";
import { OpenAIToolChoice } from "./utils/openai.js";
export type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput };
interface TokenUsage {
completionTokens?: number;
promptTokens?: number;
totalTokens?: number;
}
interface OpenAILLMOutput {
tokenUsage: TokenUsage;
}
type OpenAIRoleEnum = "system" | "assistant" | "user" | "function" | "tool";
export declare function messageToOpenAIRole(message: BaseMessage): OpenAIRoleEnum;
export declare function _convertMessagesToOpenAIParams(messages: BaseMessage[]): OpenAIClient.Chat.Completions.ChatCompletionMessageParam[];
type ChatOpenAIToolType = BindToolsInput | OpenAIClient.ChatCompletionTool;
export interface ChatOpenAIStructuredOutputMethodOptions<IncludeRaw extends boolean> extends StructuredOutputMethodOptions<IncludeRaw> {
/**
* strict: If `true` and `method` = "function_calling", model output is
* guaranteed to exactly match the schema. If `true`, the input schema
* will also be validated according to
* https://platform.openai.com/docs/guides/structured-outputs/supported-schemas.
* If `false`, input schema will not be validated and model output will not
* be validated.
* If `undefined`, `strict` argument will not be passed to the model.
*
* @version 0.2.6
* @note Planned breaking change in version `0.3.0`:
* `strict` will default to `true` when `method` is
* "function_calling" as of version `0.3.0`.
*/
strict?: boolean;
}
export interface ChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
tools?: ChatOpenAIToolType[];
tool_choice?: OpenAIToolChoice;
promptIndex?: number;
response_format?: ChatOpenAIResponseFormat;
seed?: number;
/**
* Additional options to pass to streamed completions.
* If provided takes precedence over "streamUsage" set at initialization time.
*/
stream_options?: {
/**
* Whether or not to include token usage in the stream.
* If set to `true`, this will include an additional
* chunk at the end of the stream with the token usage.
*/
include_usage: boolean;
};
/**
* Whether or not to restrict the ability to
* call multiple tools in one response.
*/
parallel_tool_calls?: boolean;
/**
* If `true`, model output is guaranteed to exactly match the JSON Schema
* provided in the tool definition. If `true`, the input schema will also be
* validated according to
* https://platform.openai.com/docs/guides/structured-outputs/supported-schemas.
*
* If `false`, input schema will not be validated and model output will not
* be validated.
*
* If `undefined`, `strict` argument will not be passed to the model.
*
* @version 0.2.6
*/
strict?: boolean;
}
export interface ChatOpenAIFields extends Partial<OpenAIChatInput>, Partial<AzureOpenAIInput>, BaseChatModelParams {
configuration?: ClientOptions & LegacyOpenAIInput;
}
/**
* OpenAI chat model integration.
*
* Setup:
* Install `@langchain/openai` and set an environment variable named `OPENAI_API_KEY`.
*
* ```bash
* npm install @langchain/openai
* export OPENAI_API_KEY="your-api-key"
* ```
*
* ## [Constructor args](https://api.js.langchain.com/classes/langchain_openai.ChatOpenAI.html#constructor)
*
* ## [Runtime args](https://api.js.langchain.com/interfaces/langchain_openai.ChatOpenAICallOptions.html)
*
* Runtime args can be passed as the second argument to any of the base runnable methods `.invoke`. `.stream`, `.batch`, etc.
* They can also be passed via `.bind`, or the second arg in `.bindTools`, like shown in the examples below:
*
* ```typescript
* // When calling `.bind`, call options should be passed via the first argument
* const llmWithArgsBound = llm.bind({
* stop: ["\n"],
* tools: [...],
* });
*
* // When calling `.bindTools`, call options should be passed via the second argument
* const llmWithTools = llm.bindTools(
* [...],
* {
* tool_choice: "auto",
* }
* );
* ```
*
* ## Examples
*
* <details open>
* <summary><strong>Instantiate</strong></summary>
*
* ```typescript
* import { ChatOpenAI } from '@langchain/openai';
*
* const llm = new ChatOpenAI({
* model: "gpt-4o",
* temperature: 0,
* maxTokens: undefined,
* timeout: undefined,
* maxRetries: 2,
* // apiKey: "...",
* // baseUrl: "...",
* // organization: "...",
* // other params...
* });
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Invoking</strong></summary>
*
* ```typescript
* const input = `Translate "I love programming" into French.`;
*
* // Models also accept a list of chat messages or a formatted prompt
* const result = await llm.invoke(input);
* console.log(result);
* ```
*
* ```txt
* AIMessage {
* "id": "chatcmpl-9u4Mpu44CbPjwYFkTbeoZgvzB00Tz",
* "content": "J'adore la programmation.",
* "response_metadata": {
* "tokenUsage": {
* "completionTokens": 5,
* "promptTokens": 28,
* "totalTokens": 33
* },
* "finish_reason": "stop",
* "system_fingerprint": "fp_3aa7262c27"
* },
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Streaming Chunks</strong></summary>
*
* ```typescript
* for await (const chunk of await llm.stream(input)) {
* console.log(chunk);
* }
* ```
*
* ```txt
* AIMessageChunk {
* "id": "chatcmpl-9u4NWB7yUeHCKdLr6jP3HpaOYHTqs",
* "content": ""
* }
* AIMessageChunk {
* "content": "J"
* }
* AIMessageChunk {
* "content": "'adore"
* }
* AIMessageChunk {
* "content": " la"
* }
* AIMessageChunk {
* "content": " programmation",,
* }
* AIMessageChunk {
* "content": ".",,
* }
* AIMessageChunk {
* "content": "",
* "response_metadata": {
* "finish_reason": "stop",
* "system_fingerprint": "fp_c9aa9c0491"
* },
* }
* AIMessageChunk {
* "content": "",
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Aggregate Streamed Chunks</strong></summary>
*
* ```typescript
* import { AIMessageChunk } from '@langchain/core/messages';
* import { concat } from '@langchain/core/utils/stream';
*
* const stream = await llm.stream(input);
* let full: AIMessageChunk | undefined;
* for await (const chunk of stream) {
* full = !full ? chunk : concat(full, chunk);
* }
* console.log(full);
* ```
*
* ```txt
* AIMessageChunk {
* "id": "chatcmpl-9u4PnX6Fy7OmK46DASy0bH6cxn5Xu",
* "content": "J'adore la programmation.",
* "response_metadata": {
* "prompt": 0,
* "completion": 0,
* "finish_reason": "stop",
* },
* "usage_metadata": {
* "input_tokens": 28,
* "output_tokens": 5,
* "total_tokens": 33
* }
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Bind tools</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const GetWeather = {
* name: "GetWeather",
* description: "Get the current weather in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const GetPopulation = {
* name: "GetPopulation",
* description: "Get the current population in a given location",
* schema: z.object({
* location: z.string().describe("The city and state, e.g. San Francisco, CA")
* }),
* }
*
* const llmWithTools = llm.bindTools(
* [GetWeather, GetPopulation],
* {
* // strict: true // enforce tool args schema is respected
* }
* );
* const aiMsg = await llmWithTools.invoke(
* "Which city is hotter today and which is bigger: LA or NY?"
* );
* console.log(aiMsg.tool_calls);
* ```
*
* ```txt
* [
* {
* name: 'GetWeather',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: 'call_uPU4FiFzoKAtMxfmPnfQL6UK'
* },
* {
* name: 'GetWeather',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'call_UNkEwuQsHrGYqgDQuH9nPAtX'
* },
* {
* name: 'GetPopulation',
* args: { location: 'Los Angeles, CA' },
* type: 'tool_call',
* id: 'call_kL3OXxaq9OjIKqRTpvjaCH14'
* },
* {
* name: 'GetPopulation',
* args: { location: 'New York, NY' },
* type: 'tool_call',
* id: 'call_s9KQB1UWj45LLGaEnjz0179q'
* }
* ]
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Structured Output</strong></summary>
*
* ```typescript
* import { z } from 'zod';
*
* const Joke = z.object({
* setup: z.string().describe("The setup of the joke"),
* punchline: z.string().describe("The punchline to the joke"),
* rating: z.number().optional().describe("How funny the joke is, from 1 to 10")
* }).describe('Joke to tell user.');
*
* const structuredLlm = llm.withStructuredOutput(Joke, {
* name: "Joke",
* strict: true, // Optionally enable OpenAI structured outputs
* });
* const jokeResult = await structuredLlm.invoke("Tell me a joke about cats");
* console.log(jokeResult);
* ```
*
* ```txt
* {
* setup: 'Why was the cat sitting on the computer?',
* punchline: 'Because it wanted to keep an eye on the mouse!',
* rating: 7
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>JSON Object Response Format</strong></summary>
*
* ```typescript
* const jsonLlm = llm.bind({ response_format: { type: "json_object" } });
* const jsonLlmAiMsg = await jsonLlm.invoke(
* "Return a JSON object with key 'randomInts' and a value of 10 random ints in [0-99]"
* );
* console.log(jsonLlmAiMsg.content);
* ```
*
* ```txt
* {
* "randomInts": [23, 87, 45, 12, 78, 34, 56, 90, 11, 67]
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Multimodal</strong></summary>
*
* ```typescript
* import { HumanMessage } from '@langchain/core/messages';
*
* const imageUrl = "https://example.com/image.jpg";
* const imageData = await fetch(imageUrl).then(res => res.arrayBuffer());
* const base64Image = Buffer.from(imageData).toString('base64');
*
* const message = new HumanMessage({
* content: [
* { type: "text", text: "describe the weather in this image" },
* {
* type: "image_url",
* image_url: { url: `data:image/jpeg;base64,${base64Image}` },
* },
* ]
* });
*
* const imageDescriptionAiMsg = await llm.invoke([message]);
* console.log(imageDescriptionAiMsg.content);
* ```
*
* ```txt
* The weather in the image appears to be clear and sunny. The sky is mostly blue with a few scattered white clouds, indicating fair weather. The bright sunlight is casting shadows on the green, grassy hill, suggesting it is a pleasant day with good visibility. There are no signs of rain or stormy conditions.
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Usage Metadata</strong></summary>
*
* ```typescript
* const aiMsgForMetadata = await llm.invoke(input);
* console.log(aiMsgForMetadata.usage_metadata);
* ```
*
* ```txt
* { input_tokens: 28, output_tokens: 5, total_tokens: 33 }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Logprobs</strong></summary>
*
* ```typescript
* const logprobsLlm = new ChatOpenAI({ logprobs: true });
* const aiMsgForLogprobs = await logprobsLlm.invoke(input);
* console.log(aiMsgForLogprobs.response_metadata.logprobs);
* ```
*
* ```txt
* {
* content: [
* {
* token: 'J',
* logprob: -0.000050616763,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: "'",
* logprob: -0.01868736,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: 'ad',
* logprob: -0.0000030545007,
* bytes: [Array],
* top_logprobs: []
* },
* { token: 'ore', logprob: 0, bytes: [Array], top_logprobs: [] },
* {
* token: ' la',
* logprob: -0.515404,
* bytes: [Array],
* top_logprobs: []
* },
* {
* token: ' programm',
* logprob: -0.0000118755715,
* bytes: [Array],
* top_logprobs: []
* },
* { token: 'ation', logprob: 0, bytes: [Array], top_logprobs: [] },
* {
* token: '.',
* logprob: -0.0000037697225,
* bytes: [Array],
* top_logprobs: []
* }
* ],
* refusal: null
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>Response Metadata</strong></summary>
*
* ```typescript
* const aiMsgForResponseMetadata = await llm.invoke(input);
* console.log(aiMsgForResponseMetadata.response_metadata);
* ```
*
* ```txt
* {
* tokenUsage: { completionTokens: 5, promptTokens: 28, totalTokens: 33 },
* finish_reason: 'stop',
* system_fingerprint: 'fp_3aa7262c27'
* }
* ```
* </details>
*
* <br />
*
* <details>
* <summary><strong>JSON Schema Structured Output</strong></summary>
*
* ```typescript
* const llmForJsonSchema = new ChatOpenAI({
* model: "gpt-4o-2024-08-06",
* }).withStructuredOutput(
* z.object({
* command: z.string().describe("The command to execute"),
* expectedOutput: z.string().describe("The expected output of the command"),
* options: z
* .array(z.string())
* .describe("The options you can pass to the command"),
* }),
* {
* method: "jsonSchema",
* strict: true, // Optional when using the `jsonSchema` method
* }
* );
*
* const jsonSchemaRes = await llmForJsonSchema.invoke(
* "What is the command to list files in a directory?"
* );
* console.log(jsonSchemaRes);
* ```
*
* ```txt
* {
* command: 'ls',
* expectedOutput: 'A list of files and subdirectories within the specified directory.',
* options: [
* '-a: include directory entries whose names begin with a dot (.).',
* '-l: use a long listing format.',
* '-h: with -l, print sizes in human readable format (e.g., 1K, 234M, 2G).',
* '-t: sort by time, newest first.',
* '-r: reverse order while sorting.',
* '-S: sort by file size, largest first.',
* '-R: list subdirectories recursively.'
* ]
* }
* ```
* </details>
*
* <br />
*/
export declare class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions> extends BaseChatModel<CallOptions, AIMessageChunk> implements OpenAIChatInput, AzureOpenAIInput {
static lc_name(): string;
get callKeys(): string[];
lc_serializable: boolean;
get lc_secrets(): {
[key: string]: string;
} | undefined;
get lc_aliases(): Record<string, string>;
temperature: number;
topP: number;
frequencyPenalty: number;
presencePenalty: number;
n: number;
logitBias?: Record<string, number>;
modelName: string;
model: string;
modelKwargs?: OpenAIChatInput["modelKwargs"];
stop?: string[];
stopSequences?: string[];
user?: string;
timeout?: number;
streaming: boolean;
streamUsage: boolean;
maxTokens?: number;
logprobs?: boolean;
topLogprobs?: number;
openAIApiKey?: string;
apiKey?: string;
azureOpenAIApiVersion?: string;
azureOpenAIApiKey?: string;
azureADTokenProvider?: () => Promise<string>;
azureOpenAIApiInstanceName?: string;
azureOpenAIApiDeploymentName?: string;
azureOpenAIBasePath?: string;
azureOpenAIEndpoint?: string;
organization?: string;
__includeRawResponse?: boolean;
protected client: OpenAIClient;
protected clientConfig: ClientOptions;
/**
* Whether the model supports the `strict` argument when passing in tools.
* If `undefined` the `strict` argument will not be passed to OpenAI.
*/
supportsStrictToolCalling?: boolean;
constructor(fields?: ChatOpenAIFields,
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput);
getLsParams(options: this["ParsedCallOptions"]): LangSmithParams;
bindTools(tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions>): Runnable<BaseLanguageModelInput, AIMessageChunk, CallOptions>;
private createResponseFormat;
/**
* Get the parameters used to invoke the model
*/
invocationParams(options?: this["ParsedCallOptions"], extra?: {
streaming?: boolean;
}): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages">;
/** @ignore */
_identifyingParams(): Omit<OpenAIClient.Chat.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
/**
* Get the identifying parameters for the model
*
*/
identifyingParams(): Omit<OpenAIClient.Chat.Completions.ChatCompletionCreateParams, "messages"> & {
model_name: string;
} & ClientOptions;
/** @ignore */
_generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
/**
* Estimate the number of tokens a prompt will use.
* Modified from: https://github.com/hmarr/openai-chat-tokens/blob/main/src/index.ts
*/
private getEstimatedTokenCountFromPrompt;
/**
* Estimate the number of tokens an array of generations have used.
*/
private getNumTokensFromGenerations;
getNumTokensFromMessages(messages: BaseMessage[]): Promise<{
totalCount: number;
countPerMessage: number[];
}>;
/**
* Calls the OpenAI API with retry logic in case of failures.
* @param request The request to send to the OpenAI API.
* @param options Optional configuration for the API call.
* @returns The response from the OpenAI API.
*/
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming, options?: OpenAICoreRequestOptions): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>;
completionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<OpenAIClient.Chat.Completions.ChatCompletion>;
/**
* Call the beta chat completions parse endpoint. This should only be called if
* response_format is set to "json_object".
* @param {OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming} request
* @param {OpenAICoreRequestOptions | undefined} options
*/
betaParsedCompletionWithRetry(request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming, options?: OpenAICoreRequestOptions): Promise<ReturnType<OpenAIClient["beta"]["chat"]["completions"]["parse"]>>;
protected _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
_llmType(): string;
/** @ignore */
_combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: StructuredOutputMethodParams<RunOutput, false> | z.ZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<false>): Runnable<BaseLanguageModelInput, RunOutput>;
withStructuredOutput<RunOutput extends Record<string, any> = Record<string, any>>(outputSchema: StructuredOutputMethodParams<RunOutput, true> | z.ZodType<RunOutput> | Record<string, any>, config?: ChatOpenAIStructuredOutputMethodOptions<true>): Runnable<BaseLanguageModelInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
}