595 lines
23 KiB
JavaScript
595 lines
23 KiB
JavaScript
import { BaseChatModel, } from "@langchain/core/language_models/chat_models";
|
|
import { RunnableBinding, ensureConfig, } from "@langchain/core/runnables";
|
|
import { AsyncGeneratorWithSetup, IterableReadableStream, } from "@langchain/core/utils/stream";
|
|
const _SUPPORTED_PROVIDERS = [
|
|
"openai",
|
|
"anthropic",
|
|
"azure_openai",
|
|
"cohere",
|
|
"google-vertexai",
|
|
"google-genai",
|
|
"ollama",
|
|
"together",
|
|
"fireworks",
|
|
"mistralai",
|
|
"groq",
|
|
"bedrock",
|
|
];
|
|
async function _initChatModelHelper(model, modelProvider,
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
params = {}) {
|
|
const modelProviderCopy = modelProvider || _inferModelProvider(model);
|
|
if (!modelProviderCopy) {
|
|
throw new Error(`Unable to infer model provider for { model: ${model} }, please specify modelProvider directly.`);
|
|
}
|
|
try {
|
|
switch (modelProviderCopy) {
|
|
case "openai": {
|
|
const { ChatOpenAI } = await import("@langchain/openai");
|
|
return new ChatOpenAI({ model, ...params });
|
|
}
|
|
case "anthropic": {
|
|
const { ChatAnthropic } = await import("@langchain/anthropic");
|
|
return new ChatAnthropic({ model, ...params });
|
|
}
|
|
case "azure_openai": {
|
|
const { AzureChatOpenAI } = await import("@langchain/openai");
|
|
return new AzureChatOpenAI({ model, ...params });
|
|
}
|
|
case "cohere": {
|
|
const { ChatCohere } = await import("@langchain/cohere");
|
|
return new ChatCohere({ model, ...params });
|
|
}
|
|
case "google-vertexai": {
|
|
const { ChatVertexAI } = await import("@langchain/google-vertexai");
|
|
return new ChatVertexAI({ model, ...params });
|
|
}
|
|
case "google-genai": {
|
|
const { ChatGoogleGenerativeAI } = await import("@langchain/google-genai");
|
|
return new ChatGoogleGenerativeAI({ model, ...params });
|
|
}
|
|
case "ollama": {
|
|
const { ChatOllama } = await import("@langchain/ollama");
|
|
return new ChatOllama({ model, ...params });
|
|
}
|
|
case "mistralai": {
|
|
const { ChatMistralAI } = await import("@langchain/mistralai");
|
|
return new ChatMistralAI({ model, ...params });
|
|
}
|
|
case "groq": {
|
|
const { ChatGroq } = await import("@langchain/groq");
|
|
return new ChatGroq({ model, ...params });
|
|
}
|
|
case "bedrock": {
|
|
const { ChatBedrockConverse } = await import("@langchain/aws");
|
|
return new ChatBedrockConverse({ model, ...params });
|
|
}
|
|
case "fireworks": {
|
|
const { ChatFireworks } = await import(
|
|
// We can not 'expect-error' because if you explicitly build `@langchain/community`
|
|
// this import will be able to be resolved, thus there will be no error. However
|
|
// this will never be the case in CI.
|
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
// @ts-ignore - Can not install as a proper dependency due to circular dependency
|
|
"@langchain/community/chat_models/fireworks");
|
|
return new ChatFireworks({ model, ...params });
|
|
}
|
|
case "together": {
|
|
const { ChatTogetherAI } = await import(
|
|
// We can not 'expect-error' because if you explicitly build `@langchain/community`
|
|
// this import will be able to be resolved, thus there will be no error. However
|
|
// this will never be the case in CI.
|
|
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
|
|
// @ts-ignore - Can not install as a proper dependency due to circular dependency
|
|
"@langchain/community/chat_models/togetherai");
|
|
return new ChatTogetherAI({ model, ...params });
|
|
}
|
|
default: {
|
|
const supported = _SUPPORTED_PROVIDERS.join(", ");
|
|
throw new Error(`Unsupported { modelProvider: ${modelProviderCopy} }.\n\nSupported model providers are: ${supported}`);
|
|
}
|
|
}
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
}
|
|
catch (e) {
|
|
if ("code" in e && e.code.includes("ERR_MODULE_NOT_FOUND")) {
|
|
const attemptedPackage = new Error(e).message
|
|
.split("Error: Cannot find package '")[1]
|
|
.split("'")[0];
|
|
throw new Error(`Unable to import ${attemptedPackage}. Please install with ` +
|
|
`\`npm install ${attemptedPackage}\` or \`yarn add ${attemptedPackage}\``);
|
|
}
|
|
throw e;
|
|
}
|
|
}
|
|
/**
|
|
* Attempts to infer the model provider based on the given model name.
|
|
*
|
|
* @param {string} modelName - The name of the model to infer the provider for.
|
|
* @returns {string | undefined} The inferred model provider name, or undefined if unable to infer.
|
|
*
|
|
* @example
|
|
* _inferModelProvider("gpt-4"); // returns "openai"
|
|
* _inferModelProvider("claude-2"); // returns "anthropic"
|
|
* _inferModelProvider("unknown-model"); // returns undefined
|
|
*/
|
|
export function _inferModelProvider(modelName) {
|
|
if (modelName.startsWith("gpt-3") || modelName.startsWith("gpt-4")) {
|
|
return "openai";
|
|
}
|
|
else if (modelName.startsWith("claude")) {
|
|
return "anthropic";
|
|
}
|
|
else if (modelName.startsWith("command")) {
|
|
return "cohere";
|
|
}
|
|
else if (modelName.startsWith("accounts/fireworks")) {
|
|
return "fireworks";
|
|
}
|
|
else if (modelName.startsWith("gemini")) {
|
|
return "google-vertexai";
|
|
}
|
|
else if (modelName.startsWith("amazon.")) {
|
|
return "bedrock";
|
|
}
|
|
else {
|
|
return undefined;
|
|
}
|
|
}
|
|
class _ConfigurableModel extends BaseChatModel {
|
|
_llmType() {
|
|
return "chat_model";
|
|
}
|
|
constructor(fields) {
|
|
super(fields);
|
|
Object.defineProperty(this, "lc_namespace", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: ["langchain", "chat_models"]
|
|
});
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
Object.defineProperty(this, "_defaultConfig", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: {}
|
|
});
|
|
/**
|
|
* @default "any"
|
|
*/
|
|
Object.defineProperty(this, "_configurableFields", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: "any"
|
|
});
|
|
/**
|
|
* @default ""
|
|
*/
|
|
Object.defineProperty(this, "_configPrefix", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: void 0
|
|
});
|
|
/**
|
|
* Methods which should be called after the model is initialized.
|
|
* The key will be the method name, and the value will be the arguments.
|
|
*/
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
Object.defineProperty(this, "_queuedMethodOperations", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: {}
|
|
});
|
|
// Extract the input types from the `BaseModel` class.
|
|
Object.defineProperty(this, "withStructuredOutput", {
|
|
enumerable: true,
|
|
configurable: true,
|
|
writable: true,
|
|
value: (schema, ...args) => {
|
|
this._queuedMethodOperations.withStructuredOutput = [schema, ...args];
|
|
return new _ConfigurableModel({
|
|
defaultConfig: this._defaultConfig,
|
|
configurableFields: this._configurableFields,
|
|
configPrefix: this._configPrefix,
|
|
queuedMethodOperations: this._queuedMethodOperations,
|
|
});
|
|
}
|
|
});
|
|
this._defaultConfig = fields.defaultConfig ?? {};
|
|
if (fields.configurableFields === "any") {
|
|
this._configurableFields = "any";
|
|
}
|
|
else {
|
|
this._configurableFields = fields.configurableFields ?? "any";
|
|
}
|
|
if (fields.configPrefix) {
|
|
this._configPrefix = fields.configPrefix.endsWith("_")
|
|
? fields.configPrefix
|
|
: `${fields.configPrefix}_`;
|
|
}
|
|
else {
|
|
this._configPrefix = "";
|
|
}
|
|
this._queuedMethodOperations =
|
|
fields.queuedMethodOperations ?? this._queuedMethodOperations;
|
|
}
|
|
async _model(config) {
|
|
const params = { ...this._defaultConfig, ...this._modelParams(config) };
|
|
let initializedModel = await _initChatModelHelper(params.model, params.modelProvider, params);
|
|
// Apply queued method operations
|
|
const queuedMethodOperationsEntries = Object.entries(this._queuedMethodOperations);
|
|
if (queuedMethodOperationsEntries.length > 0) {
|
|
for (const [method, args] of queuedMethodOperationsEntries) {
|
|
if (method in initializedModel &&
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
typeof initializedModel[method] === "function") {
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
initializedModel = await initializedModel[method](...args);
|
|
}
|
|
}
|
|
}
|
|
return initializedModel;
|
|
}
|
|
async _generate(messages, options, runManager) {
|
|
const model = await this._model(options);
|
|
return model._generate(messages, options ?? {}, runManager);
|
|
}
|
|
bindTools(tools,
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
params) {
|
|
this._queuedMethodOperations.bindTools = [tools, params];
|
|
return new _ConfigurableModel({
|
|
defaultConfig: this._defaultConfig,
|
|
configurableFields: this._configurableFields,
|
|
configPrefix: this._configPrefix,
|
|
queuedMethodOperations: this._queuedMethodOperations,
|
|
});
|
|
}
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
_modelParams(config) {
|
|
const configurable = config?.configurable ?? {};
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
let modelParams = {};
|
|
for (const [key, value] of Object.entries(configurable)) {
|
|
if (key.startsWith(this._configPrefix)) {
|
|
const strippedKey = this._removePrefix(key, this._configPrefix);
|
|
modelParams[strippedKey] = value;
|
|
}
|
|
}
|
|
if (this._configurableFields !== "any") {
|
|
modelParams = Object.fromEntries(Object.entries(modelParams).filter(([key]) => this._configurableFields.includes(key)));
|
|
}
|
|
return modelParams;
|
|
}
|
|
_removePrefix(str, prefix) {
|
|
return str.startsWith(prefix) ? str.slice(prefix.length) : str;
|
|
}
|
|
/**
|
|
* Bind config to a Runnable, returning a new Runnable.
|
|
* @param {RunnableConfig | undefined} [config] - The config to bind.
|
|
* @returns {RunnableBinding<RunInput, RunOutput, CallOptions>} A new RunnableBinding with the bound config.
|
|
*/
|
|
withConfig(config) {
|
|
const mergedConfig = { ...(config || {}) };
|
|
const modelParams = this._modelParams(mergedConfig);
|
|
const remainingConfig = Object.fromEntries(Object.entries(mergedConfig).filter(([k]) => k !== "configurable"));
|
|
remainingConfig.configurable = Object.fromEntries(Object.entries(mergedConfig.configurable || {}).filter(([k]) => this._configPrefix &&
|
|
!Object.keys(modelParams).includes(this._removePrefix(k, this._configPrefix))));
|
|
const newConfigurableModel = new _ConfigurableModel({
|
|
defaultConfig: { ...this._defaultConfig, ...modelParams },
|
|
configurableFields: Array.isArray(this._configurableFields)
|
|
? [...this._configurableFields]
|
|
: this._configurableFields,
|
|
configPrefix: this._configPrefix,
|
|
});
|
|
return new RunnableBinding({
|
|
config: mergedConfig,
|
|
bound: newConfigurableModel,
|
|
});
|
|
}
|
|
async invoke(input, options) {
|
|
const model = await this._model(options);
|
|
const config = ensureConfig(options);
|
|
return model.invoke(input, config);
|
|
}
|
|
async stream(input, options) {
|
|
const model = await this._model(options);
|
|
const wrappedGenerator = new AsyncGeneratorWithSetup({
|
|
generator: await model.stream(input, options),
|
|
config: options,
|
|
});
|
|
await wrappedGenerator.setup;
|
|
return IterableReadableStream.fromAsyncGenerator(wrappedGenerator);
|
|
}
|
|
async batch(inputs, options, batchOptions) {
|
|
// We can super this since the base runnable implementation of
|
|
// `.batch` will call `.invoke` on each input.
|
|
return super.batch(inputs, options, batchOptions);
|
|
}
|
|
async *transform(generator, options) {
|
|
const model = await this._model(options);
|
|
const config = ensureConfig(options);
|
|
yield* model.transform(generator, config);
|
|
}
|
|
async *streamLog(input, options, streamOptions) {
|
|
const model = await this._model(options);
|
|
const config = ensureConfig(options);
|
|
yield* model.streamLog(input, config, {
|
|
...streamOptions,
|
|
_schemaFormat: "original",
|
|
includeNames: streamOptions?.includeNames,
|
|
includeTypes: streamOptions?.includeTypes,
|
|
includeTags: streamOptions?.includeTags,
|
|
excludeNames: streamOptions?.excludeNames,
|
|
excludeTypes: streamOptions?.excludeTypes,
|
|
excludeTags: streamOptions?.excludeTags,
|
|
});
|
|
}
|
|
streamEvents(input, options, streamOptions) {
|
|
// eslint-disable-next-line @typescript-eslint/no-this-alias
|
|
const outerThis = this;
|
|
async function* wrappedGenerator() {
|
|
const model = await outerThis._model(options);
|
|
const config = ensureConfig(options);
|
|
const eventStream = model.streamEvents(input, config, streamOptions);
|
|
for await (const chunk of eventStream) {
|
|
yield chunk;
|
|
}
|
|
}
|
|
return IterableReadableStream.fromAsyncGenerator(wrappedGenerator());
|
|
}
|
|
}
|
|
// ################################# FOR CONTRIBUTORS #################################
|
|
//
|
|
// If adding support for a new provider, please append the provider
|
|
// name to the supported list in the docstring below.
|
|
//
|
|
// ####################################################################################
|
|
/**
|
|
* Initialize a ChatModel from the model name and provider.
|
|
* Must have the integration package corresponding to the model provider installed.
|
|
*
|
|
* @template {extends BaseLanguageModelInput = BaseLanguageModelInput} RunInput - The input type for the model.
|
|
* @template {extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions} CallOptions - Call options for the model.
|
|
*
|
|
* @param {string | ChatModelProvider} [model] - The name of the model, e.g. "gpt-4", "claude-3-opus-20240229".
|
|
* @param {Object} [fields] - Additional configuration options.
|
|
* @param {string} [fields.modelProvider] - The model provider. Supported values include:
|
|
* - openai (@langchain/openai)
|
|
* - anthropic (@langchain/anthropic)
|
|
* - azure_openai (@langchain/openai)
|
|
* - google-vertexai (@langchain/google-vertexai)
|
|
* - google-genai (@langchain/google-genai)
|
|
* - bedrock (@langchain/aws)
|
|
* - cohere (@langchain/cohere)
|
|
* - fireworks (@langchain/community/chat_models/fireworks)
|
|
* - together (@langchain/community/chat_models/togetherai)
|
|
* - mistralai (@langchain/mistralai)
|
|
* - groq (@langchain/groq)
|
|
* - ollama (@langchain/ollama)
|
|
* @param {string[] | "any"} [fields.configurableFields] - Which model parameters are configurable:
|
|
* - undefined: No configurable fields.
|
|
* - "any": All fields are configurable. (See Security Note in description)
|
|
* - string[]: Specified fields are configurable.
|
|
* @param {string} [fields.configPrefix] - Prefix for configurable fields at runtime.
|
|
* @param {Record<string, any>} [fields.params] - Additional keyword args to pass to the ChatModel constructor.
|
|
* @returns {Promise<_ConfigurableModel<RunInput, CallOptions>>} A class which extends BaseChatModel.
|
|
* @throws {Error} If modelProvider cannot be inferred or isn't supported.
|
|
* @throws {Error} If the model provider integration package is not installed.
|
|
*
|
|
* @example Initialize non-configurable models
|
|
* ```typescript
|
|
* import { initChatModel } from "langchain/chat_models/universal";
|
|
*
|
|
* const gpt4 = await initChatModel("gpt-4", {
|
|
* modelProvider: "openai",
|
|
* temperature: 0.25,
|
|
* });
|
|
* const gpt4Result = await gpt4.invoke("what's your name");
|
|
*
|
|
* const claude = await initChatModel("claude-3-opus-20240229", {
|
|
* modelProvider: "anthropic",
|
|
* temperature: 0.25,
|
|
* });
|
|
* const claudeResult = await claude.invoke("what's your name");
|
|
*
|
|
* const gemini = await initChatModel("gemini-1.5-pro", {
|
|
* modelProvider: "google-vertexai",
|
|
* temperature: 0.25,
|
|
* });
|
|
* const geminiResult = await gemini.invoke("what's your name");
|
|
* ```
|
|
*
|
|
* @example Create a partially configurable model with no default model
|
|
* ```typescript
|
|
* import { initChatModel } from "langchain/chat_models/universal";
|
|
*
|
|
* const configurableModel = await initChatModel(undefined, {
|
|
* temperature: 0,
|
|
* configurableFields: ["model", "apiKey"],
|
|
* });
|
|
*
|
|
* const gpt4Result = await configurableModel.invoke("what's your name", {
|
|
* configurable: {
|
|
* model: "gpt-4",
|
|
* },
|
|
* });
|
|
*
|
|
* const claudeResult = await configurableModel.invoke("what's your name", {
|
|
* configurable: {
|
|
* model: "claude-3-5-sonnet-20240620",
|
|
* },
|
|
* });
|
|
* ```
|
|
*
|
|
* @example Create a fully configurable model with a default model and a config prefix
|
|
* ```typescript
|
|
* import { initChatModel } from "langchain/chat_models/universal";
|
|
*
|
|
* const configurableModelWithDefault = await initChatModel("gpt-4", {
|
|
* modelProvider: "openai",
|
|
* configurableFields: "any",
|
|
* configPrefix: "foo",
|
|
* temperature: 0,
|
|
* });
|
|
*
|
|
* const openaiResult = await configurableModelWithDefault.invoke(
|
|
* "what's your name",
|
|
* {
|
|
* configurable: {
|
|
* foo_apiKey: process.env.OPENAI_API_KEY,
|
|
* },
|
|
* }
|
|
* );
|
|
*
|
|
* const claudeResult = await configurableModelWithDefault.invoke(
|
|
* "what's your name",
|
|
* {
|
|
* configurable: {
|
|
* foo_model: "claude-3-5-sonnet-20240620",
|
|
* foo_modelProvider: "anthropic",
|
|
* foo_temperature: 0.6,
|
|
* foo_apiKey: process.env.ANTHROPIC_API_KEY,
|
|
* },
|
|
* }
|
|
* );
|
|
* ```
|
|
*
|
|
* @example Bind tools to a configurable model:
|
|
* ```typescript
|
|
* import { initChatModel } from "langchain/chat_models/universal";
|
|
* import { z } from "zod";
|
|
* import { tool } from "@langchain/core/tools";
|
|
*
|
|
* const getWeatherTool = tool(
|
|
* (input) => {
|
|
* // Do something with the input
|
|
* return JSON.stringify(input);
|
|
* },
|
|
* {
|
|
* schema: z
|
|
* .object({
|
|
* location: z
|
|
* .string()
|
|
* .describe("The city and state, e.g. San Francisco, CA"),
|
|
* })
|
|
* .describe("Get the current weather in a given location"),
|
|
* name: "GetWeather",
|
|
* description: "Get the current weather in a given location",
|
|
* }
|
|
* );
|
|
*
|
|
* const getPopulationTool = tool(
|
|
* (input) => {
|
|
* // Do something with the input
|
|
* return JSON.stringify(input);
|
|
* },
|
|
* {
|
|
* schema: z
|
|
* .object({
|
|
* location: z
|
|
* .string()
|
|
* .describe("The city and state, e.g. San Francisco, CA"),
|
|
* })
|
|
* .describe("Get the current population in a given location"),
|
|
* name: "GetPopulation",
|
|
* description: "Get the current population in a given location",
|
|
* }
|
|
* );
|
|
*
|
|
* const configurableModel = await initChatModel("gpt-4", {
|
|
* configurableFields: ["model", "modelProvider", "apiKey"],
|
|
* temperature: 0,
|
|
* });
|
|
*
|
|
* const configurableModelWithTools = configurableModel.bind({
|
|
* tools: [getWeatherTool, getPopulationTool],
|
|
* });
|
|
*
|
|
* const configurableToolResult = await configurableModelWithTools.invoke(
|
|
* "Which city is hotter today and which is bigger: LA or NY?",
|
|
* {
|
|
* configurable: {
|
|
* apiKey: process.env.OPENAI_API_KEY,
|
|
* },
|
|
* }
|
|
* );
|
|
*
|
|
* const configurableToolResult2 = await configurableModelWithTools.invoke(
|
|
* "Which city is hotter today and which is bigger: LA or NY?",
|
|
* {
|
|
* configurable: {
|
|
* model: "claude-3-5-sonnet-20240620",
|
|
* apiKey: process.env.ANTHROPIC_API_KEY,
|
|
* },
|
|
* }
|
|
* );
|
|
* ```
|
|
*
|
|
* @description
|
|
* This function initializes a ChatModel based on the provided model name and provider.
|
|
* It supports various model providers and allows for runtime configuration of model parameters.
|
|
*
|
|
* Security Note: Setting `configurableFields` to "any" means fields like api_key, base_url, etc.
|
|
* can be altered at runtime, potentially redirecting model requests to a different service/user.
|
|
* Make sure that if you're accepting untrusted configurations, you enumerate the
|
|
* `configurableFields` explicitly.
|
|
*
|
|
* The function will attempt to infer the model provider from the model name if not specified.
|
|
* Certain model name prefixes are associated with specific providers:
|
|
* - gpt-3... or gpt-4... -> openai
|
|
* - claude... -> anthropic
|
|
* - amazon.... -> bedrock
|
|
* - gemini... -> google-vertexai
|
|
* - command... -> cohere
|
|
* - accounts/fireworks... -> fireworks
|
|
*
|
|
* @since 0.2.11
|
|
* @version 0.2.11
|
|
*/
|
|
export async function initChatModel(model,
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
fields) {
|
|
const { configurableFields, configPrefix, modelProvider, ...params } = {
|
|
configPrefix: "",
|
|
...(fields ?? {}),
|
|
};
|
|
let configurableFieldsCopy = configurableFields;
|
|
if (!model && !configurableFieldsCopy) {
|
|
configurableFieldsCopy = ["model", "modelProvider"];
|
|
}
|
|
if (configPrefix && !configurableFieldsCopy) {
|
|
console.warn(`{ configPrefix: ${configPrefix} } has been set but no fields are configurable. Set ` +
|
|
`{ configurableFields: [...] } to specify the model params that are ` +
|
|
`configurable.`);
|
|
}
|
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
const paramsCopy = { ...params };
|
|
if (!configurableFieldsCopy) {
|
|
return new _ConfigurableModel({
|
|
defaultConfig: {
|
|
...paramsCopy,
|
|
model,
|
|
modelProvider,
|
|
},
|
|
configPrefix,
|
|
});
|
|
}
|
|
else {
|
|
if (model) {
|
|
paramsCopy.model = model;
|
|
}
|
|
if (modelProvider) {
|
|
paramsCopy.modelProvider = modelProvider;
|
|
}
|
|
return new _ConfigurableModel({
|
|
defaultConfig: paramsCopy,
|
|
configPrefix,
|
|
configurableFields: configurableFieldsCopy,
|
|
});
|
|
}
|
|
}
|