import { BaseLanguageModel, BaseLanguageModelInterface, BaseLanguageModelInput } from "@langchain/core/language_models/base"; import type { ChainValues } from "@langchain/core/utils/types"; import type { Generation } from "@langchain/core/outputs"; import type { BaseMessage } from "@langchain/core/messages"; import type { BasePromptValueInterface } from "@langchain/core/prompt_values"; import { BasePromptTemplate } from "@langchain/core/prompts"; import { BaseLLMOutputParser } from "@langchain/core/output_parsers"; import { CallbackManager, BaseCallbackConfig, CallbackManagerForChainRun, Callbacks } from "@langchain/core/callbacks/manager"; import { Runnable } from "@langchain/core/runnables"; import { BaseChain, ChainInputs } from "./base.js"; import { SerializedLLMChain } from "./serde.js"; type LLMType = BaseLanguageModelInterface | Runnable | Runnable; type CallOptionsIfAvailable = T extends { CallOptions: infer CO; } ? CO : any; /** * Interface for the input parameters of the LLMChain class. */ export interface LLMChainInput extends ChainInputs { /** Prompt object to use */ prompt: BasePromptTemplate; /** LLM Wrapper to use */ llm: Model; /** Kwargs to pass to LLM */ llmKwargs?: CallOptionsIfAvailable; /** OutputParser to use */ outputParser?: BaseLLMOutputParser; /** Key to use for output, defaults to `text` */ outputKey?: string; } /** * @deprecated This class will be removed in 1.0.0. Use the LangChain Expression Language (LCEL) instead. * See the example below for how to use LCEL with the LLMChain class: * * Chain to run queries against LLMs. * * @example * ```ts * import { ChatPromptTemplate } from "@langchain/core/prompts"; * import { ChatOpenAI } from "@langchain/openai"; * * const prompt = ChatPromptTemplate.fromTemplate("Tell me a {adjective} joke"); * const llm = new ChatOpenAI(); * const chain = prompt.pipe(llm); * * const response = await chain.invoke({ adjective: "funny" }); * ``` */ export declare class LLMChain extends BaseChain implements LLMChainInput { static lc_name(): string; lc_serializable: boolean; prompt: BasePromptTemplate; llm: Model; llmKwargs?: CallOptionsIfAvailable; outputKey: string; outputParser?: BaseLLMOutputParser; get inputKeys(): string[]; get outputKeys(): string[]; constructor(fields: LLMChainInput); private getCallKeys; /** @ignore */ _selectMemoryInputs(values: ChainValues): ChainValues; /** @ignore */ _getFinalOutput(generations: Generation[], promptValue: BasePromptValueInterface, runManager?: CallbackManagerForChainRun): Promise; /** * Run the core logic of this chain and add to output if desired. * * Wraps _call and handles memory. */ call(values: ChainValues & CallOptionsIfAvailable, config?: Callbacks | BaseCallbackConfig): Promise; /** @ignore */ _call(values: ChainValues & CallOptionsIfAvailable, runManager?: CallbackManagerForChainRun): Promise; /** * Format prompt with values and pass to LLM * * @param values - keys to pass to prompt template * @param callbackManager - CallbackManager to use * @returns Completion from LLM. * * @example * ```ts * llm.predict({ adjective: "funny" }) * ``` */ predict(values: ChainValues & CallOptionsIfAvailable, callbackManager?: CallbackManager): Promise; _chainType(): "llm"; static deserialize(data: SerializedLLMChain): Promise>>; /** @deprecated */ serialize(): SerializedLLMChain; _getNumTokens(text: string): Promise; } export {};