agsamantha/node_modules/langchain/dist/agents/openai_tools/output_parser.cjs

101 lines
3.8 KiB
JavaScript
Raw Normal View History

2024-10-02 15:15:21 -05:00
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.OpenAIToolsAgentOutputParser = void 0;
const messages_1 = require("@langchain/core/messages");
const output_parsers_1 = require("@langchain/core/output_parsers");
const types_js_1 = require("../types.cjs");
/**
* @example
* ```typescript
* const prompt = ChatPromptTemplate.fromMessages([
* ["ai", "You are a helpful assistant"],
* ["human", "{input}"],
* new MessagesPlaceholder("agent_scratchpad"),
* ]);
*
* const runnableAgent = RunnableSequence.from([
* {
* input: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
* agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) =>
* formatToOpenAIToolMessages(i.steps),
* },
* prompt,
* new ChatOpenAI({
* modelName: "gpt-3.5-turbo-1106",
* temperature: 0,
* }).bind({ tools: tools.map((tool) => convertToOpenAITool(tool)) }),
* new OpenAIToolsAgentOutputParser(),
* ]).withConfig({ runName: "OpenAIToolsAgent" });
*
* const result = await runnableAgent.invoke({
* input:
* "What is the sum of the current temperature in San Francisco, New York, and Tokyo?",
* });
* ```
*/
class OpenAIToolsAgentOutputParser extends types_js_1.AgentMultiActionOutputParser {
constructor() {
super(...arguments);
Object.defineProperty(this, "lc_namespace", {
enumerable: true,
configurable: true,
writable: true,
value: ["langchain", "agents", "openai"]
});
}
static lc_name() {
return "OpenAIToolsAgentOutputParser";
}
async parse(text) {
throw new Error(`OpenAIFunctionsAgentOutputParser can only parse messages.\nPassed input: ${text}`);
}
async parseResult(generations) {
if ("message" in generations[0] && (0, messages_1.isBaseMessage)(generations[0].message)) {
return this.parseAIMessage(generations[0].message);
}
throw new Error("parseResult on OpenAIFunctionsAgentOutputParser only works on ChatGeneration output");
}
/**
* Parses the output message into a ToolsAgentAction[] or AgentFinish
* object.
* @param message The BaseMessage to parse.
* @returns A ToolsAgentAction[] or AgentFinish object.
*/
parseAIMessage(message) {
if (message.content && typeof message.content !== "string") {
throw new Error("This agent cannot parse non-string model responses.");
}
if (message.additional_kwargs.tool_calls) {
const toolCalls = message.additional_kwargs.tool_calls;
try {
return toolCalls.map((toolCall, i) => {
const toolInput = toolCall.function.arguments
? JSON.parse(toolCall.function.arguments)
: {};
const messageLog = i === 0 ? [message] : [];
return {
tool: toolCall.function.name,
toolInput,
toolCallId: toolCall.id,
log: `Invoking "${toolCall.function.name}" with ${toolCall.function.arguments ?? "{}"}\n${message.content}`,
messageLog,
};
});
}
catch (error) {
throw new output_parsers_1.OutputParserException(`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${error}`);
}
}
else {
return {
returnValues: { output: message.content },
log: message.content,
};
}
}
getFormatInstructions() {
throw new Error("getFormatInstructions not implemented inside OpenAIToolsAgentOutputParser.");
}
}
exports.OpenAIToolsAgentOutputParser = OpenAIToolsAgentOutputParser;