import { BaseLanguageModelInput, ToolDefinition } from "@langchain/core/language_models/base"; import { BaseChatModel, BaseChatModelParams, BindToolsInput, type BaseChatModelCallOptions } from "@langchain/core/language_models/chat_models"; import { BaseMessage, type AIMessageChunk } from "@langchain/core/messages"; import { type RunnableBatchOptions, RunnableBinding, type RunnableConfig, type RunnableToolLike } from "@langchain/core/runnables"; import { IterableReadableStream } from "@langchain/core/utils/stream"; import { type LogStreamCallbackHandlerInput, type RunLogPatch, type StreamEvent } from "@langchain/core/tracers/log_stream"; import { type StructuredToolInterface } from "@langchain/core/tools"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { ChatResult } from "@langchain/core/outputs"; interface EventStreamCallbackHandlerInput extends Omit { } declare const _SUPPORTED_PROVIDERS: readonly ["openai", "anthropic", "azure_openai", "cohere", "google-vertexai", "google-genai", "ollama", "together", "fireworks", "mistralai", "groq", "bedrock"]; export type ChatModelProvider = (typeof _SUPPORTED_PROVIDERS)[number]; export interface ConfigurableChatModelCallOptions extends BaseChatModelCallOptions { tools?: (StructuredToolInterface | Record | ToolDefinition | RunnableToolLike)[]; } /** * Attempts to infer the model provider based on the given model name. * * @param {string} modelName - The name of the model to infer the provider for. * @returns {string | undefined} The inferred model provider name, or undefined if unable to infer. * * @example * _inferModelProvider("gpt-4"); // returns "openai" * _inferModelProvider("claude-2"); // returns "anthropic" * _inferModelProvider("unknown-model"); // returns undefined */ export declare function _inferModelProvider(modelName: string): string | undefined; interface ConfigurableModelFields extends BaseChatModelParams { defaultConfig?: Record; /** * @default "any" */ configurableFields?: string[] | "any"; /** * @default "" */ configPrefix?: string; /** * Methods which should be called after the model is initialized. * The key will be the method name, and the value will be the arguments. */ queuedMethodOperations?: Record; } declare class _ConfigurableModel extends BaseChatModel { _llmType(): string; lc_namespace: string[]; _defaultConfig?: Record; /** * @default "any" */ _configurableFields: string[] | "any"; /** * @default "" */ _configPrefix: string; /** * Methods which should be called after the model is initialized. * The key will be the method name, and the value will be the arguments. */ _queuedMethodOperations: Record; constructor(fields: ConfigurableModelFields); _model(config?: RunnableConfig): Promise>; _generate(messages: BaseMessage[], options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise; bindTools(tools: BindToolsInput[], params?: Record): _ConfigurableModel; withStructuredOutput: BaseChatModel["withStructuredOutput"]; _modelParams(config?: RunnableConfig): Record; _removePrefix(str: string, prefix: string): string; /** * Bind config to a Runnable, returning a new Runnable. * @param {RunnableConfig | undefined} [config] - The config to bind. * @returns {RunnableBinding} A new RunnableBinding with the bound config. */ withConfig(config?: RunnableConfig): RunnableBinding; invoke(input: RunInput, options?: CallOptions): Promise; stream(input: RunInput, options?: CallOptions): Promise>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions?: false; }): Promise; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions & { returnExceptions: true; }): Promise<(AIMessageChunk | Error)[]>; batch(inputs: RunInput[], options?: Partial | Partial[], batchOptions?: RunnableBatchOptions): Promise<(AIMessageChunk | Error)[]>; transform(generator: AsyncGenerator, options: CallOptions): AsyncGenerator; streamLog(input: RunInput, options?: Partial, streamOptions?: Omit): AsyncGenerator; streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; }, streamOptions?: Omit): IterableReadableStream; streamEvents(input: RunInput, options: Partial & { version: "v1" | "v2"; encoding: "text/event-stream"; }, streamOptions?: Omit): IterableReadableStream; } export interface InitChatModelFields extends Partial> { modelProvider?: string; configurableFields?: string[] | "any"; configPrefix?: string; } export type ConfigurableFields = "any" | string[]; export declare function initChatModel(model: string, fields?: Partial> & { modelProvider?: string; configurableFields?: never; configPrefix?: string; }): Promise<_ConfigurableModel>; export declare function initChatModel(model: never, options?: Partial> & { modelProvider?: string; configurableFields?: never; configPrefix?: string; }): Promise<_ConfigurableModel>; export declare function initChatModel(model?: string, options?: Partial> & { modelProvider?: string; configurableFields?: ConfigurableFields; configPrefix?: string; }): Promise<_ConfigurableModel>; export {};