diff --git a/docs/core_docs/docs/integrations/llms/ibm.mdx b/docs/core_docs/docs/integrations/llms/ibm.mdx new file mode 100644 index 000000000000..5fe91a211a80 --- /dev/null +++ b/docs/core_docs/docs/integrations/llms/ibm.mdx @@ -0,0 +1,166 @@ +# @langchain/community/llm/ibm + +This is an intergation for the LangChain.js community with Watsonx AI by IBM through their SDK. + +## Installation + +```bash npm2yarn +npm install @langchain/community @langchain/core +``` + +## Chat Models + +This package contains the `WatsonxLLM` class, which is the recommended way to interface with the Watsonx series of models. + +To use, install the requirements, and configure your environment depending on what type od authentication you will be using. + +## IAM authentication + +```bash +export WATSONX_AI_AUTH_TYPE=iam +export WATSONX_AI_APIKEY= +``` + +## Bearer token authentication + +```bash +export WATSONX_AI_AUTH_TYPE=bearertoken +export WATSONX_AI_BEARER_TOKEN= +``` + +### CP4D authentication + +```bash +export WATSONX_AI_AUTH_TYPE=cp4d +export WATSONX_AI_USERNAME= +export WATSONX_AI_PASSWORD= +export WATSONX_AI_URL= +``` + +Once these are places in your enviromental variables and object is initialized authentication will proceed automatically. + +Authentication can also be accomplished by passing these values as parameters to a new instance. + +## IAM authentication + +```typescript +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "iam", + watsonxAIApikey: "", +}; +const instance = new WatsonxLLM(props); +``` + +## Bearer token authentication + +```typescript +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "bearertoken", + watsonxAIBearerToken: "", +}; +const instance = new WatsonxLLM(props); +``` + +### CP4D authentication + +```typescript +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "cp4d", + watsonxAIUsername: "", + watsonxAIPassword: "", + watsonxAIUrl: "", +}; +const instance = new WatsonxLLM(props); +``` + +## Loading the model + +You might need to adjust model parameters for different models or tasks. For more details on the parameters, refer to IBM's documentation. + +```typescript +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + decoding_method: "sample", + max_new_tokens: 100, + min_new_tokens: 1, + temperature: 0.5, + top_k: 50, + top_p: 1, +}; +const instance = new WatsonxLLM({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + projectId: "", + spaceId: "", + idOrName: "", + modelId: "", + ...props, +}); +``` + +Note: + +- You must provide spaceId, projectId or idOrName(deployment id) in order to proceed. +- Depending on the region of your provisioned service instance, use correct serviceUrl. +- You need to specify the model you want to use for inferencing through model_id. + +## Props overwrittion + +Passed props at initialization will last for the whole life cycle of the object, however you may overwrite them for a single method's call by passing second argument as below + +```typescript +const result = await instance.invoke("Print hello world.", { + modelId: "", + parameters: { + max_new_tokens: 20, + }, +}); +console.log(result); +``` + +## Text generation + +```typescript +const result = await instance.invoke("Print hello world."); +console.log(result); + +const results = await instance.generate([ + "Print hello world.", + "Print bye, bye world!", +]); +console.log(result); +``` + +## Streaming + +```typescript +const result = await instance.stream("Print hello world."); +for await (let chunk of result) { + console.log(chunk); +} +``` + +## Tokenization + +This package has it's custom getNumTokens implementation which returns exact amount of tokens that would be used. + +```typescript +const tokens = await instance.getNumTokens("Print hello world."); +console.log(tokens); +``` diff --git a/docs/core_docs/docs/integrations/text_embedding/ibm.mdx b/docs/core_docs/docs/integrations/text_embedding/ibm.mdx new file mode 100644 index 000000000000..41f468406acb --- /dev/null +++ b/docs/core_docs/docs/integrations/text_embedding/ibm.mdx @@ -0,0 +1,130 @@ +# @langchain/community/embeddings/ibm + +This is an intergation for the LangChain.js community with Watsonx AI by IBM through their SDK. + +## Installation + +```bash npm2yarn +npm install @langchain/community @langchain/core +``` + +## Embeddings + +This package contains the `WatsonxEmbeddings` class, which is the recommended way to interface with the Watsonx series of models. + +To use, install the requirements, and configure your environment depending on what type od authentication you will be using. + +## IAM authentication + +```bash +export WATSONX_AI_AUTH_TYPE=iam +export WATSONX_AI_APIKEY= +``` + +## Bearer token authentication + +```bash +export WATSONX_AI_AUTH_TYPE=bearertoken +export WATSONX_AI_BEARER_TOKEN= +``` + +### CP4D authentication + +```bash +export WATSONX_AI_AUTH_TYPE=cp4d +export WATSONX_AI_USERNAME= +export WATSONX_AI_PASSWORD= +export WATSONX_AI_URL= +``` + +Once these are places in your enviromental variables and object is initialized authentication will proceed automatically. + +Authentication can also be accomplished by passing these values as parameters to a new instance. + +## IAM authentication + +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "iam", + watsonxAIApikey: "", +}; +const instance = new WatsonxEmbeddings(props); +``` + +## Bearer token authentication + +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "bearertoken", + watsonxAIBearerToken: "", +}; +const instance = new WatsonxEmbeddings(props); +``` + +### CP4D authentication + +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const props = { + version: "YYYY-MM-DD", + serviceUrl: "", + projectId: "", + watsonxAIAuthType: "cp4d", + watsonxAIUsername: "", + watsonxAIPassword: "", + watsonxAIUrl: "", +}; +const instance = new WatsonxEmbeddings(props); +``` + +## Loading the model + +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const instance = new WatsonxEmbeddings({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + projectId: "", + spaceId: "", + idOrName: "", + modelId: "", +}); +``` + +Note: + +- You must provide spaceId, projectId or idOrName(deployment id) in order to proceed. +- Depending on the region of your provisioned service instance, use correct serviceUrl. +- You need to specify the model you want to use for inferencing through model_id. + +## Embeddings + +Following package supports embeddings model, you can proceed with following code snipet. + +```typescript +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const instance = new WatsonxEmbeddings({ + version: "YYYY-MM-DD", + serviceUrl: process.env.API_URL, + projectId: "", + spaceId: "", + idOrName: "", + modelId: "", +}); + +const result = await instance.embedQuery("Hello world!"); +console.log(result); +``` diff --git a/examples/src/embeddings/ibm.ts b/examples/src/embeddings/ibm.ts new file mode 100644 index 000000000000..d6533c1f967b --- /dev/null +++ b/examples/src/embeddings/ibm.ts @@ -0,0 +1,10 @@ +import { WatsonxEmbeddings } from "@langchain/community/embeddings/ibm"; + +const instance = new WatsonxEmbeddings({ + version: "YYYY-MM-DD", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, +}); + +const result = await instance.embedQuery("Hello world!"); +console.log(result); diff --git a/examples/src/llms/ibm.ts b/examples/src/llms/ibm.ts new file mode 100644 index 000000000000..5d5ac1fd208f --- /dev/null +++ b/examples/src/llms/ibm.ts @@ -0,0 +1,30 @@ +import { WatsonxLLM } from "@langchain/community/llms/ibm"; + +const props = { + decoding_method: "sample", + max_new_tokens: 100, + min_new_tokens: 1, + temperature: 0.5, + top_k: 50, + top_p: 1, +}; +const instance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + ...props, +}); + +const result = await instance.invoke("Print hello world."); +console.log(result); + +const results = await instance.generate([ + "Print hello world.", + "Print bye, bye world!", +]); +console.log(results); + +const stream = await instance.stream("Print hello world."); +for await (const chunk of stream) { + console.log(chunk); +} diff --git a/libs/langchain-community/.gitignore b/libs/langchain-community/.gitignore index 0168b6a3643a..a166dc5d907a 100644 --- a/libs/langchain-community/.gitignore +++ b/libs/langchain-community/.gitignore @@ -162,6 +162,10 @@ embeddings/hf_transformers.cjs embeddings/hf_transformers.js embeddings/hf_transformers.d.ts embeddings/hf_transformers.d.cts +embeddings/ibm.cjs +embeddings/ibm.js +embeddings/ibm.d.ts +embeddings/ibm.d.cts embeddings/jina.cjs embeddings/jina.js embeddings/jina.d.ts @@ -254,6 +258,10 @@ llms/hf.cjs llms/hf.js llms/hf.d.ts llms/hf.d.cts +llms/ibm.cjs +llms/ibm.js +llms/ibm.d.ts +llms/ibm.d.cts llms/llama_cpp.cjs llms/llama_cpp.js llms/llama_cpp.d.ts diff --git a/libs/langchain-community/langchain.config.js b/libs/langchain-community/langchain.config.js index 61cdcbc7ab65..92861de9bfa2 100644 --- a/libs/langchain-community/langchain.config.js +++ b/libs/langchain-community/langchain.config.js @@ -75,6 +75,7 @@ export const config = { "embeddings/gradient_ai": "embeddings/gradient_ai", "embeddings/hf": "embeddings/hf", "embeddings/hf_transformers": "embeddings/hf_transformers", + "embeddings/ibm": "embeddings/ibm", "embeddings/jina": "embeddings/jina", "embeddings/llama_cpp": "embeddings/llama_cpp", "embeddings/minimax": "embeddings/minimax", @@ -99,6 +100,7 @@ export const config = { "llms/friendli": "llms/friendli", "llms/gradient_ai": "llms/gradient_ai", "llms/hf": "llms/hf", + "llms/ibm": "llms/ibm", "llms/llama_cpp": "llms/llama_cpp", "llms/ollama": "llms/ollama", "llms/portkey": "llms/portkey", @@ -184,7 +186,8 @@ export const config = { // callbacks "callbacks/handlers/llmonitor": "callbacks/handlers/llmonitor", "callbacks/handlers/lunary": "callbacks/handlers/lunary", - "callbacks/handlers/upstash_ratelimit": "callbacks/handlers/upstash_ratelimit", + "callbacks/handlers/upstash_ratelimit": + "callbacks/handlers/upstash_ratelimit", // retrievers "retrievers/amazon_kendra": "retrievers/amazon_kendra", "retrievers/amazon_knowledge_base": "retrievers/amazon_knowledge_base", @@ -305,18 +308,20 @@ export const config = { "utils/event_source_parse": "utils/event_source_parse", "utils/cassandra": "utils/cassandra", // experimental - "experimental/callbacks/handlers/datadog": "experimental/callbacks/handlers/datadog", + "experimental/callbacks/handlers/datadog": + "experimental/callbacks/handlers/datadog", "experimental/graph_transformers/llm": "experimental/graph_transformers/llm", "experimental/multimodal_embeddings/googlevertexai": "experimental/multimodal_embeddings/googlevertexai", "experimental/hubs/makersuite/googlemakersuitehub": "experimental/hubs/makersuite/googlemakersuitehub", - "experimental/chat_models/ollama_functions": "experimental/chat_models/ollama_functions", + "experimental/chat_models/ollama_functions": + "experimental/chat_models/ollama_functions", "experimental/llms/chrome_ai": "experimental/llms/chrome_ai", "experimental/tools/pyinterpreter": "experimental/tools/pyinterpreter", // chains - "chains/graph_qa/cypher": "chains/graph_qa/cypher" + "chains/graph_qa/cypher": "chains/graph_qa/cypher", }, requiresOptionalDependency: [ "tools/aws_sfn", @@ -335,6 +340,7 @@ export const config = { "embeddings/tensorflow", "embeddings/hf", "embeddings/hf_transformers", + "embeddings/ibm", "embeddings/jina", "embeddings/llama_cpp", "embeddings/gradient_ai", @@ -347,6 +353,7 @@ export const config = { "llms/cohere", "llms/gradient_ai", "llms/hf", + "llms/ibm", "llms/raycast", "llms/replicate", "llms/sagemaker_endpoint", @@ -511,7 +518,7 @@ export const config = { // chains "chains/graph_qa/cypher", // langgraph checkpointers - "langgraph/checkpointers/vercel_kv" + "langgraph/checkpointers/vercel_kv", ], packageSuffix: "community", tsConfigPath: resolve("./tsconfig.json"), diff --git a/libs/langchain-community/package.json b/libs/langchain-community/package.json index 652677c8f19a..d9d87e2c7959 100644 --- a/libs/langchain-community/package.json +++ b/libs/langchain-community/package.json @@ -35,10 +35,12 @@ "author": "LangChain", "license": "MIT", "dependencies": { + "@ibm-cloud/watsonx-ai": "1.0.1", "@langchain/openai": ">=0.2.0 <0.4.0", "binary-extensions": "^2.2.0", "expr-eval": "^2.0.2", "flat": "^5.0.2", + "ibm-cloud-sdk-core": "^5.0.2", "js-yaml": "^4.1.0", "langchain": ">=0.2.3 <0.4.0", "langsmith": "~0.1.56", @@ -1068,6 +1070,15 @@ "import": "./embeddings/hf_transformers.js", "require": "./embeddings/hf_transformers.cjs" }, + "./embeddings/ibm": { + "types": { + "import": "./embeddings/ibm.d.ts", + "require": "./embeddings/ibm.d.cts", + "default": "./embeddings/ibm.d.ts" + }, + "import": "./embeddings/ibm.js", + "require": "./embeddings/ibm.cjs" + }, "./embeddings/jina": { "types": { "import": "./embeddings/jina.d.ts", @@ -1275,6 +1286,15 @@ "import": "./llms/hf.js", "require": "./llms/hf.cjs" }, + "./llms/ibm": { + "types": { + "import": "./llms/ibm.d.ts", + "require": "./llms/ibm.d.cts", + "default": "./llms/ibm.d.ts" + }, + "import": "./llms/ibm.js", + "require": "./llms/ibm.cjs" + }, "./llms/llama_cpp": { "types": { "import": "./llms/llama_cpp.d.ts", @@ -3180,6 +3200,10 @@ "embeddings/hf_transformers.js", "embeddings/hf_transformers.d.ts", "embeddings/hf_transformers.d.cts", + "embeddings/ibm.cjs", + "embeddings/ibm.js", + "embeddings/ibm.d.ts", + "embeddings/ibm.d.cts", "embeddings/jina.cjs", "embeddings/jina.js", "embeddings/jina.d.ts", @@ -3272,6 +3296,10 @@ "llms/hf.js", "llms/hf.d.ts", "llms/hf.d.cts", + "llms/ibm.cjs", + "llms/ibm.js", + "llms/ibm.d.ts", + "llms/ibm.d.cts", "llms/llama_cpp.cjs", "llms/llama_cpp.js", "llms/llama_cpp.d.ts", diff --git a/libs/langchain-community/src/embeddings/ibm.ts b/libs/langchain-community/src/embeddings/ibm.ts new file mode 100644 index 000000000000..26bca3cbadc3 --- /dev/null +++ b/libs/langchain-community/src/embeddings/ibm.ts @@ -0,0 +1,131 @@ +import { Embeddings } from "@langchain/core/embeddings"; +import { + EmbeddingParameters, + TextEmbeddingsParams, +} from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; +import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; +import { AsyncCaller } from "@langchain/core/utils/async_caller"; +import { WatsonxAuth, WatsonxParams } from "../types/watsonx_ai.js"; +import { authenticateAndSetInstance } from "../utils/ibm.js"; + +export interface WatsonxEmbeddingsParams + extends Omit, + Pick {} + +export class WatsonxEmbeddings + extends Embeddings + implements WatsonxEmbeddingsParams, WatsonxParams +{ + modelId = "ibm/slate-125m-english-rtrvr"; + + serviceUrl: string; + + version: string; + + spaceId?: string; + + projectId?: string; + + truncate_input_tokens?: number; + + maxRetries?: number; + + maxConcurrency?: number; + + private service: WatsonXAI; + + constructor(fields: WatsonxEmbeddingsParams & WatsonxAuth & WatsonxParams) { + const superProps = { maxConcurrency: 2, ...fields }; + super(superProps); + this.modelId = fields?.modelId ? fields.modelId : this.modelId; + this.version = fields.version; + this.serviceUrl = fields.serviceUrl; + this.truncate_input_tokens = fields.truncate_input_tokens; + this.maxConcurrency = fields.maxConcurrency; + this.maxRetries = fields.maxRetries; + if (fields.projectId && fields.spaceId) + throw new Error("Maximum 1 id type can be specified per instance"); + else if (!fields.projectId && !fields.spaceId && !fields.idOrName) + throw new Error( + "No id specified! At least id of 1 type has to be specified" + ); + this.projectId = fields?.projectId; + this.spaceId = fields?.spaceId; + this.serviceUrl = fields?.serviceUrl; + const { + watsonxAIApikey, + watsonxAIAuthType, + watsonxAIBearerToken, + watsonxAIUsername, + watsonxAIPassword, + watsonxAIUrl, + version, + serviceUrl, + } = fields; + const auth = authenticateAndSetInstance({ + watsonxAIApikey, + watsonxAIAuthType, + watsonxAIBearerToken, + watsonxAIUsername, + watsonxAIPassword, + watsonxAIUrl, + version, + serviceUrl, + }); + if (auth) this.service = auth; + else throw new Error("You have not provided one type of authentication"); + } + + scopeId() { + if (this.projectId) return { projectId: this.projectId }; + else return { spaceId: this.spaceId }; + } + + ivocationParams(): EmbeddingParameters { + return { + truncate_input_tokens: this.truncate_input_tokens, + }; + } + + async completionWithRetry(callback: () => T) { + const caller = new AsyncCaller({ + maxConcurrency: this.maxConcurrency, + maxRetries: this.maxRetries, + }); + return caller.call(async () => callback()); + } + + async listModels() { + const listModelParams = { + filters: "function_embedding", + }; + const listModels = await this.completionWithRetry(() => + this.service.listFoundationModelSpecs(listModelParams) + ); + return listModels.result.resources?.map((item) => item.model_id); + } + + private async embedSingleText(inputs: string[]) { + const textEmbeddingParams: TextEmbeddingsParams = { + inputs, + modelId: this.modelId, + ...this.scopeId(), + parameters: this.ivocationParams(), + }; + + const embeddings = await this.completionWithRetry(() => + this.service.embedText(textEmbeddingParams) + ); + return embeddings.result.results.map((item) => item.embedding); + } + + async embedDocuments(documents: string[]): Promise { + const data = await this.embedSingleText(documents); + return data; + } + + async embedQuery(document: string): Promise { + const data = await this.embedSingleText([document]); + return data[0]; + } +} diff --git a/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts b/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts new file mode 100644 index 000000000000..9361a7915213 --- /dev/null +++ b/libs/langchain-community/src/embeddings/tests/ibm.int.test.ts @@ -0,0 +1,62 @@ +/* eslint-disable no-process-env */ +import { test } from "@jest/globals"; +import { WatsonxEmbeddings } from "../ibm.js"; + +describe("Test embeddings", () => { + test("embedQuery method", async () => { + const embeddings = new WatsonxEmbeddings({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + const res = await embeddings.embedQuery("Hello world"); + expect(typeof res[0]).toBe("number"); + }); + + test("embedDocuments", async () => { + const embeddings = new WatsonxEmbeddings({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + const res = await embeddings.embedDocuments(["Hello world", "Bye world"]); + expect(res).toHaveLength(2); + expect(typeof res[0][0]).toBe("number"); + expect(typeof res[1][0]).toBe("number"); + }); + + test("Concurrency", async () => { + const embeddings = new WatsonxEmbeddings({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + maxConcurrency: 4, + }); + const res = await embeddings.embedDocuments([ + "Hello world", + "Bye world", + "Hello world", + "Bye world", + "Hello world", + "Bye world", + "Hello world", + "Bye world", + ]); + expect(res).toHaveLength(8); + expect(res.find((embedding) => typeof embedding[0] !== "number")).toBe( + undefined + ); + }); + + test("List models", async () => { + const embeddings = new WatsonxEmbeddings({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + maxConcurrency: 4, + }); + const res = await embeddings.listModels(); + expect(res?.length).toBeGreaterThan(0); + if (res) expect(typeof res[0]).toBe("string"); + }); +}); diff --git a/libs/langchain-community/src/embeddings/tests/ibm.test.ts b/libs/langchain-community/src/embeddings/tests/ibm.test.ts new file mode 100644 index 000000000000..60868a24e70b --- /dev/null +++ b/libs/langchain-community/src/embeddings/tests/ibm.test.ts @@ -0,0 +1,122 @@ +/* eslint-disable no-process-env */ +import { testProperties } from "../../llms/tests/ibm.test.js"; +import { WatsonxEmbeddings } from "../ibm.js"; + +const fakeAuthProp = { + watsonxAIAuthType: "iam", + watsonxAIApikey: "fake_key", +}; +describe("Embeddings unit tests", () => { + describe("Positive tests", () => { + test("Basic properties", () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + const instance = new WatsonxEmbeddings({ ...testProps, ...fakeAuthProp }); + testProperties(instance, testProps); + }); + + test("Basic properties", () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + truncate_input_tokens: 10, + maxConcurrency: 2, + maxRetries: 2, + modelId: "ibm/slate-125m-english-rtrvr", + }; + const instance = new WatsonxEmbeddings({ ...testProps, ...fakeAuthProp }); + + testProperties(instance, testProps); + }); + }); + + describe("Negative tests", () => { + test("Missing id", async () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + }; + expect( + () => + new WatsonxEmbeddings({ + ...testProps, + ...fakeAuthProp, + }) + ).toThrowError(); + }); + + test("Missing other props", async () => { + // @ts-expect-error Intentionally passing wrong value + const testPropsProjectId: WatsonxInputLLM = { + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + expect( + () => + new WatsonxEmbeddings({ + ...testPropsProjectId, + }) + ).toThrowError(); + // @ts-expect-error //Intentionally passing wrong value + const testPropsServiceUrl: WatsonxInputLLM = { + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + }; + expect( + () => + new WatsonxEmbeddings({ + ...testPropsServiceUrl, + }) + ).toThrowError(); + const testPropsVersion = { + version: "2024-05-31", + }; + expect( + () => + new WatsonxEmbeddings({ + // @ts-expect-error Intentionally passing wrong props + testPropsVersion, + }) + ).toThrowError(); + }); + + test("Passing more than one id", async () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + spaceId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + expect( + () => + new WatsonxEmbeddings({ + ...testProps, + ...fakeAuthProp, + }) + ).toThrowError(); + }); + + test("Invalid properties", () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + const notExTestProps = { + notExisting: 12, + notExObj: { + notExProp: 12, + }, + }; + const instance = new WatsonxEmbeddings({ + ...testProps, + ...notExTestProps, + ...fakeAuthProp, + }); + + testProperties(instance, testProps, notExTestProps); + }); + }); +}); diff --git a/libs/langchain-community/src/llms/ibm.ts b/libs/langchain-community/src/llms/ibm.ts new file mode 100644 index 000000000000..b386a1614ae8 --- /dev/null +++ b/libs/langchain-community/src/llms/ibm.ts @@ -0,0 +1,567 @@ +/* eslint-disable @typescript-eslint/no-unused-vars */ +import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; +import { BaseLLM, BaseLLMParams } from "@langchain/core/language_models/llms"; +import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; +import { + DeploymentsTextGenerationParams, + DeploymentsTextGenerationStreamParams, + DeploymentTextGenProperties, + ReturnOptionProperties, + TextGenerationParams, + TextGenerationStreamParams, + TextGenLengthPenalty, + TextGenParameters, + TextTokenizationParams, + TextTokenizeParameters, +} from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; +import { + Generation, + LLMResult, + GenerationChunk, +} from "@langchain/core/outputs"; +import { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; +import { AsyncCaller } from "@langchain/core/utils/async_caller"; +import { authenticateAndSetInstance } from "../utils/ibm.js"; +import { + GenerationInfo, + ResponseChunk, + TokenUsage, + WatsonxAuth, + WatsonxParams, +} from "../types/watsonx_ai.js"; + +/** + * Input to LLM class. + */ + +export interface WatsonxCallOptionsLLM + extends BaseLanguageModelCallOptions, + Omit< + Partial< + TextGenerationParams & + TextGenerationStreamParams & + DeploymentsTextGenerationParams & + DeploymentsTextGenerationStreamParams + >, + "input" + > { + maxRetries?: number; +} + +export interface WatsonxInputLLM + extends TextGenParameters, + WatsonxParams, + BaseLLMParams { + streaming?: boolean; +} + +/** + * Integration with an LLM. + */ +export class WatsonxLLM< + CallOptions extends WatsonxCallOptionsLLM = WatsonxCallOptionsLLM + > + extends BaseLLM + implements WatsonxInputLLM +{ + // Used for tracing, replace with the same name as your class + static lc_name() { + return "Watsonx"; + } + + lc_serializable = true; + + streaming = false; + + modelId = "ibm/granite-13b-chat-v2"; + + maxRetries = 0; + + version = "2024-05-31"; + + serviceUrl: string; + + max_new_tokens?: number; + + spaceId?: string; + + projectId?: string; + + idOrName?: string; + + decoding_method?: TextGenParameters.Constants.DecodingMethod | string; + + length_penalty?: TextGenLengthPenalty; + + min_new_tokens?: number; + + random_seed?: number; + + stop_sequences?: string[]; + + temperature?: number; + + time_limit?: number; + + top_k?: number; + + top_p?: number; + + repetition_penalty?: number; + + truncate_input_tokens?: number; + + return_options?: ReturnOptionProperties; + + include_stop_sequence?: boolean; + + maxConcurrency?: number; + + private service: WatsonXAI; + + constructor(fields: WatsonxInputLLM & WatsonxAuth) { + super(fields); + this.modelId = fields.modelId ?? this.modelId; + this.version = fields.version; + this.max_new_tokens = fields.max_new_tokens ?? this.max_new_tokens; + this.serviceUrl = fields.serviceUrl; + this.decoding_method = fields.decoding_method; + this.length_penalty = fields.length_penalty; + this.min_new_tokens = fields.min_new_tokens; + this.random_seed = fields.random_seed; + this.stop_sequences = fields.stop_sequences; + this.temperature = fields.temperature; + this.time_limit = fields.time_limit; + this.top_k = fields.top_k; + this.top_p = fields.top_p; + this.repetition_penalty = fields.repetition_penalty; + this.truncate_input_tokens = fields.truncate_input_tokens; + this.return_options = fields.return_options; + this.include_stop_sequence = fields.include_stop_sequence; + this.maxRetries = fields.maxRetries || this.maxRetries; + this.maxConcurrency = fields.maxConcurrency; + this.streaming = fields.streaming || this.streaming; + if ( + (fields.projectId && fields.spaceId) || + (fields.idOrName && fields.projectId) || + (fields.spaceId && fields.idOrName) + ) + throw new Error("Maximum 1 id type can be specified per instance"); + + if (!fields.projectId && !fields.spaceId && !fields.idOrName) + throw new Error( + "No id specified! At least ide of 1 type has to be specified" + ); + this.projectId = fields?.projectId; + this.spaceId = fields?.spaceId; + this.idOrName = fields?.idOrName; + + this.serviceUrl = fields?.serviceUrl; + const { + watsonxAIApikey, + watsonxAIAuthType, + watsonxAIBearerToken, + watsonxAIUsername, + watsonxAIPassword, + watsonxAIUrl, + version, + serviceUrl, + } = fields; + + const auth = authenticateAndSetInstance({ + watsonxAIApikey, + watsonxAIAuthType, + watsonxAIBearerToken, + watsonxAIUsername, + watsonxAIPassword, + watsonxAIUrl, + version, + serviceUrl, + }); + if (auth) this.service = auth; + else throw new Error("You have not provided one type of authentication"); + } + + get lc_secrets(): { [key: string]: string } { + return { + authenticator: "AUTHENTICATOR", + apiKey: "WATSONX_AI_APIKEY", + apikey: "WATSONX_AI_APIKEY", + watsonxAIAuthType: "WATSONX_AI_AUTH_TYPE", + watsonxAIApikey: "WATSONX_AI_APIKEY", + watsonxAIBearerToken: "WATSONX_AI_BEARER_TOKEN", + watsonxAIUsername: "WATSONX_AI_USERNAME", + watsonxAIPassword: "WATSONX_AI_PASSWORD", + watsonxAIUrl: "WATSONX_AI_URL", + }; + } + + get lc_aliases(): { [key: string]: string } { + return { + authenticator: "authenticator", + apikey: "watsonx_ai_apikey", + apiKey: "watsonx_ai_apikey", + watsonxAIAuthType: "watsonx_ai_auth_type", + watsonxAIApikey: "watsonx_ai_apikey", + watsonxAIBearerToken: "watsonx_ai_bearer_token", + watsonxAIUsername: "watsonx_ai_username", + watsonxAIPassword: "watsonx_ai_password", + watsonxAIUrl: "watsonx_ai_url", + }; + } + + invocationParams( + options: this["ParsedCallOptions"] + ): TextGenParameters | DeploymentTextGenProperties { + const { parameters } = options; + + return { + max_new_tokens: parameters?.max_new_tokens ?? this.max_new_tokens, + decoding_method: parameters?.decoding_method ?? this.decoding_method, + length_penalty: parameters?.length_penalty ?? this.length_penalty, + min_new_tokens: parameters?.min_new_tokens ?? this.min_new_tokens, + random_seed: parameters?.random_seed ?? this.random_seed, + stop_sequences: options?.stop ?? this.stop_sequences, + temperature: parameters?.temperature ?? this.temperature, + time_limit: parameters?.time_limit ?? this.time_limit, + top_k: parameters?.top_k ?? this.top_k, + top_p: parameters?.top_p ?? this.top_p, + repetition_penalty: + parameters?.repetition_penalty ?? this.repetition_penalty, + truncate_input_tokens: + parameters?.truncate_input_tokens ?? this.truncate_input_tokens, + return_options: parameters?.return_options ?? this.return_options, + include_stop_sequence: + parameters?.include_stop_sequence ?? this.include_stop_sequence, + }; + } + + scopeId() { + if (this.projectId) + return { projectId: this.projectId, modelId: this.modelId }; + else if (this.spaceId) + return { spaceId: this.spaceId, modelId: this.modelId }; + else if (this.idOrName) + return { idOrName: this.idOrName, modelId: this.modelId }; + else return { spaceId: this.spaceId, modelId: this.modelId }; + } + + async listModels() { + const listModelParams = { + filters: "function_text_generation", + }; + const listModels = await this.completionWithRetry(() => + this.service.listFoundationModelSpecs(listModelParams) + ); + return listModels.result.resources?.map((item) => item.model_id); + } + + private async generateSingleMessage( + input: string, + options: this["ParsedCallOptions"], + stream: true + ): Promise>; + + private async generateSingleMessage( + input: string, + options: this["ParsedCallOptions"], + stream: false + ): Promise; + + private async generateSingleMessage( + input: string, + options: this["ParsedCallOptions"], + stream: boolean + ) { + const { + signal, + stop, + maxRetries, + maxConcurrency, + timeout, + ...requestOptions + } = options; + const tokenUsage = { generated_token_count: 0, input_token_count: 0 }; + const idOrName = options?.idOrName ?? this.idOrName; + const parameters = this.invocationParams(options); + if (stream) { + const textStream = idOrName + ? await this.service.deploymentGenerateTextStream({ + idOrName, + ...requestOptions, + parameters: { + ...parameters, + prompt_variables: { + input, + }, + }, + }) + : await this.service.generateTextStream({ + input, + parameters, + ...this.scopeId(), + ...requestOptions, + }); + return textStream as unknown as AsyncIterable; + } else { + const textGenerationPromise = idOrName + ? this.service.deploymentGenerateText({ + ...requestOptions, + idOrName, + parameters: { + ...parameters, + prompt_variables: { + input, + }, + }, + }) + : this.service.generateText({ + input, + parameters, + ...this.scopeId(), + ...requestOptions, + }); + + const textGeneration = await textGenerationPromise; + const singleGeneration: Generation[] = textGeneration.result.results.map( + (result) => { + tokenUsage.generated_token_count += result.generated_token_count + ? result.generated_token_count + : 0; + tokenUsage.input_token_count += result.input_token_count + ? result.input_token_count + : 0; + return { + text: result.generated_text, + generationInfo: { + stop_reason: result.stop_reason, + input_token_count: result.input_token_count, + generated_token_count: result.generated_token_count, + }, + }; + } + ); + return singleGeneration; + } + } + + async completionWithRetry( + callback: () => T, + options?: this["ParsedCallOptions"] + ) { + const caller = new AsyncCaller({ + maxConcurrency: options?.maxConcurrency || this.maxConcurrency, + maxRetries: this.maxRetries, + }); + const result = options + ? caller.callWithOptions( + { + signal: options.signal, + }, + async () => callback() + ) + : caller.call(async () => callback()); + + return result; + } + + async _generate( + prompts: string[], + options: this["ParsedCallOptions"], + _runManager?: CallbackManagerForLLMRun + ): Promise { + const tokenUsage: TokenUsage = { + generated_token_count: 0, + input_token_count: 0, + }; + if (this.streaming) { + const generations: Generation[][] = await Promise.all( + prompts.map(async (prompt, promptIdx) => { + if (options.signal?.aborted) { + throw new Error("AbortError"); + } + const callback = () => + this.generateSingleMessage(prompt, options, true); + + type ReturnMessage = ReturnType; + const stream = await this.completionWithRetry( + callback, + options + ); + + const responseChunk: ResponseChunk = { + id: 0, + event: "", + data: { + results: [], + }, + }; + const messages: ResponseChunk[] = []; + type ResponseChunkKeys = keyof ResponseChunk; + for await (const chunk of stream) { + if (chunk.length > 0) { + const index = chunk.indexOf(": "); + const [key, value] = [ + chunk.substring(0, index) as ResponseChunkKeys, + chunk.substring(index + 2), + ]; + if (key === "id") { + responseChunk[key] = Number(value); + } else if (key === "event") { + responseChunk[key] = String(value); + } else { + responseChunk[key] = JSON.parse(value); + } + } else if (chunk.length === 0) { + messages.push(JSON.parse(JSON.stringify(responseChunk))); + Object.assign(responseChunk, { id: 0, event: "", data: {} }); + } + } + + const geneartionsArray: GenerationInfo[] = []; + for (const message of messages) { + message.data.results.forEach((item, index) => { + const generationInfo: GenerationInfo = { + text: "", + stop_reason: "", + generated_token_count: 0, + input_token_count: 0, + }; + if (item.generated_text !== "") + void _runManager?.handleLLMNewToken(item.generated_text ?? "", { + prompt: promptIdx, + completion: 1, + }); + geneartionsArray[index] ??= generationInfo; + geneartionsArray[index].generated_token_count = + item.generated_token_count; + geneartionsArray[index].input_token_count += + item.input_token_count; + geneartionsArray[index].stop_reason = item.stop_reason; + geneartionsArray[index].text += item.generated_text; + }); + } + return geneartionsArray.map((item) => { + const { text, ...rest } = item; + tokenUsage.generated_token_count += rest.generated_token_count; + tokenUsage.input_token_count += rest.input_token_count; + return { + text, + generationInfo: rest, + }; + }); + }) + ); + const result: LLMResult = { generations, llmOutput: { tokenUsage } }; + return result; + } else { + const generations: Generation[][] = await Promise.all( + prompts.map(async (prompt) => { + if (options.signal?.aborted) { + throw new Error("AbortError"); + } + + const callback = () => + this.generateSingleMessage(prompt, options, false); + type ReturnMessage = ReturnType; + + const response = await this.completionWithRetry( + callback, + options + ); + const [generated_token_count, input_token_count] = response.reduce( + (acc, curr) => { + let generated = 0; + let inputed = 0; + if (curr?.generationInfo?.generated_token_count) + generated = curr.generationInfo.generated_token_count + acc[0]; + if (curr?.generationInfo?.input_token_count) + inputed = curr.generationInfo.input_token_count + acc[1]; + return [generated, inputed]; + }, + [0, 0] + ); + tokenUsage.generated_token_count += generated_token_count; + tokenUsage.input_token_count += input_token_count; + return response; + }) + ); + + const result: LLMResult = { generations, llmOutput: { tokenUsage } }; + return result; + } + } + + async getNumTokens( + content: string, + options?: TextTokenizeParameters + ): Promise { + const params: TextTokenizationParams = { + ...this.scopeId(), + input: content, + parameters: options, + }; + const callback = () => this.service.tokenizeText(params); + type ReturnTokens = ReturnType; + + const response = await this.completionWithRetry(callback); + return response.result.result.token_count; + } + + async *_streamResponseChunks( + prompt: string, + options: this["ParsedCallOptions"], + runManager?: CallbackManagerForLLMRun + ): AsyncGenerator { + const callback = () => this.generateSingleMessage(prompt, options, true); + type ReturnStream = ReturnType; + const streamInferDeployedPrompt = + await this.completionWithRetry(callback); + const responseChunk: ResponseChunk = { + id: 0, + event: "", + data: { + results: [], + }, + }; + for await (const chunk of streamInferDeployedPrompt) { + if (options.signal?.aborted) { + throw new Error("AbortError"); + } + + type Keys = keyof typeof responseChunk; + if (chunk.length > 0) { + const index = chunk.indexOf(": "); + const [key, value] = [ + chunk.substring(0, index) as Keys, + chunk.substring(index + 2), + ]; + if (key === "id") { + responseChunk[key] = Number(value); + } else if (key === "event") { + responseChunk[key] = String(value); + } else { + responseChunk[key] = JSON.parse(value); + } + } else if ( + chunk.length === 0 && + responseChunk.data?.results?.length > 0 + ) { + for (const item of responseChunk.data.results) { + yield new GenerationChunk({ + text: item.generated_text, + generationInfo: { + stop_reason: item.stop_reason, + }, + }); + await runManager?.handleLLMNewToken(item.generated_text ?? ""); + } + Object.assign(responseChunk, { id: 0, event: "", data: {} }); + } + } + } + + _llmType() { + return "watsonx"; + } +} diff --git a/libs/langchain-community/src/llms/tests/ibm.int.test.ts b/libs/langchain-community/src/llms/tests/ibm.int.test.ts new file mode 100644 index 000000000000..c8af7cee9c13 --- /dev/null +++ b/libs/langchain-community/src/llms/tests/ibm.int.test.ts @@ -0,0 +1,391 @@ +/* eslint-disable no-process-env */ +import { CallbackManager } from "@langchain/core/callbacks/manager"; +import { LLMResult } from "@langchain/core/outputs"; +import { StringPromptValue } from "@langchain/core/prompt_values"; +import { TokenUsage } from "../../types/watsonx_ai.js"; +import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js"; + +const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND; + +describe("Text generation", () => { + describe("Test invoke method", () => { + test("Correct value", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + await watsonXInstance.invoke("Hello world?"); + }); + + test("Invalid projectId", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: "Test wrong value", + }); + await expect(watsonXInstance.invoke("Hello world?")).rejects.toThrow(); + }); + + test("Invalid credentials", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: "Test wrong value", + watsonxAIAuthType: "iam", + watsonxAIApikey: "WrongApiKey", + watsonxAIUrl: "https://wrong.wrong/", + }); + await expect(watsonXInstance.invoke("Hello world?")).rejects.toThrow(); + }); + + test("Wrong value", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + // @ts-expect-error Intentionally passing wrong value + await watsonXInstance.invoke({}); + }); + + test("Stop", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + await watsonXInstance.invoke("Hello, how are you?", { + stop: ["Hello"], + }); + }, 5000); + + test("Stop with timeout", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: "sdadasdas" as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + maxRetries: 3, + }); + + await expect(() => + watsonXInstance.invoke("Print hello world", { timeout: 10 }) + ).rejects.toThrowError("AbortError"); + }, 5000); + + test("Signal in call options", async () => { + const watsonXInstance = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + maxRetries: 3, + }); + const controllerNoAbortion = new AbortController(); + await expect( + watsonXInstance.invoke("Print hello world", { + signal: controllerNoAbortion.signal, + }) + ).resolves.toBeDefined(); + const controllerToAbort = new AbortController(); + await expect(async () => { + const ret = watsonXInstance.invoke("Print hello world", { + signal: controllerToAbort.signal, + }); + controllerToAbort.abort(); + return ret; + }).rejects.toThrowError("AbortError"); + }, 5000); + + test("Concurenccy", async () => { + const model = new WatsonxLLM({ + maxConcurrency: 1, + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }); + await Promise.all([ + model.invoke("Print hello world"), + model.invoke("Print hello world"), + ]); + }); + + test("Token usage", async () => { + process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false"; + try { + const tokenUsage: TokenUsage = { + generated_token_count: 0, + input_token_count: 0, + }; + const model = new WatsonxLLM({ + maxConcurrency: 1, + version: "2024-05-31", + max_new_tokens: 1, + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + callbacks: CallbackManager.fromHandlers({ + async handleLLMEnd(output: LLMResult) { + const singleTokenUsage: TokenUsage | undefined = + output.llmOutput?.tokenUsage; + if (singleTokenUsage) { + tokenUsage.generated_token_count += + singleTokenUsage.generated_token_count; + tokenUsage.input_token_count += + singleTokenUsage.input_token_count; + } + }, + }), + }); + await model.invoke("Hello"); + expect(tokenUsage.generated_token_count).toBe(1); + expect(tokenUsage.input_token_count).toBe(1); + } finally { + process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground; + } + }); + + test("Streaming mode", async () => { + let countedTokens = 0; + let streamedText = ""; + let usedTokens = 0; + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + streaming: true, + + callbacks: CallbackManager.fromHandlers({ + async handleLLMEnd(output) { + usedTokens = output.llmOutput?.tokenUsage.generated_token_count; + }, + async handleLLMNewToken(token: string) { + countedTokens += 1; + streamedText += token; + }, + }), + }); + + const res = await model.invoke(" Print hello world?"); + expect(countedTokens).toBe(usedTokens); + expect(res).toBe(streamedText); + }); + }); + + describe("Test generate methods", () => { + test("Basic usage", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + }); + const res = await model.generate([ + "Print hello world!", + "Print hello universe!", + ]); + expect(res.generations.length).toBe(2); + }); + + test("Stop", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 100, + }); + + const res = await model.generate( + ["Print hello world!", "Print hello world hello!"], + { + stop: ["Hello"], + } + ); + + expect( + res.generations + .map((generation) => generation.map((item) => item.text)) + .join("") + .indexOf("world") + ).toBe(-1); + }); + + test("Streaming mode with multiple prompts", async () => { + const nrNewTokens = [0, 0, 0]; + const completions = ["", "", ""]; + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + streaming: true, + callbacks: CallbackManager.fromHandlers({ + async handleLLMNewToken(token: string, idx) { + nrNewTokens[idx.prompt] += 1; + completions[idx.prompt] += token; + }, + }), + }); + const res = await model.generate([ + "Print hello world!", + "Print hello otter!", + "Print hello butter!", + ]); + res.generations.forEach((generation, index) => { + generation.forEach((g) => { + expect(g.generationInfo?.generated_token_count).toBe( + nrNewTokens[index] + ); + }); + }); + nrNewTokens.forEach((tokens) => expect(tokens > 0).toBe(true)); + expect(res.generations.length).toBe(3); + }); + + test("Prompt value", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 5, + }); + const res = await model.generatePrompt([ + new StringPromptValue("Print hello world!"), + ]); + for (const generation of res.generations) { + expect(generation.length).toBe(1); + } + }); + }); + + describe("Test stream method", () => { + test("Basic usage", async () => { + let countedTokens = 0; + let streamedText = ""; + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 100, + callbacks: CallbackManager.fromHandlers({ + async handleLLMNewToken(token: string) { + countedTokens += 1; + streamedText += token; + }, + }), + }); + const stream = await model.stream("Print hello world."); + const chunks = []; + for await (const chunk of stream) { + chunks.push(chunk); + } + expect(chunks.length).toBeGreaterThan(1); + expect(chunks.join("")).toBe(streamedText); + }); + + test("Stop", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 100, + }); + + const stream = await model.stream("Print hello world!", { + stop: ["Hello"], + }); + const chunks = []; + for await (const chunk of stream) { + chunks.push(chunk); + } + expect(chunks.join("").indexOf("world")).toBe(-1); + }); + + test("Timeout", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 1000, + }); + await expect(async () => { + const stream = await model.stream( + "How is your day going? Be precise and tell me a lot about it/", + { + signal: AbortSignal.timeout(750), + } + ); + const chunks = []; + for await (const chunk of stream) { + chunks.push(chunk); + } + }).rejects.toThrowError(); + }); + + test("Signal in call options", async () => { + const model = new WatsonxLLM({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + max_new_tokens: 1000, + }); + const controller = new AbortController(); + await expect(async () => { + const stream = await model.stream( + "How is your day going? Be precise and tell me a lot about it", + { + signal: controller.signal, + } + ); + const chunks = []; + let i = 0; + for await (const chunk of stream) { + i += 1; + chunks.push(chunk); + if (i === 5) { + controller.abort(); + } + } + }).rejects.toThrowError(); + }); + }); + + describe("Test getNumToken method", () => { + test("Passing correct value", async () => { + const testProps: WatsonxInputLLM = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + }; + const instance = new WatsonxLLM({ + ...testProps, + }); + await expect( + instance.getNumTokens("Hello") + ).resolves.toBeGreaterThanOrEqual(0); + await expect( + instance.getNumTokens("Hello", { return_tokens: true }) + ).resolves.toBeGreaterThanOrEqual(0); + }); + + test("Passing wrong value", async () => { + const testProps: WatsonxInputLLM = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID, + maxRetries: 3, + }; + const instance = new WatsonxLLM({ + ...testProps, + }); + + // @ts-expect-error Intentionally passing wrong parameter + await expect(instance.getNumTokens(12)).rejects.toThrowError(); + await expect( + // @ts-expect-error Intentionally passing wrong parameter + instance.getNumTokens(12, { wrong: "Wrong" }) + ).rejects.toThrowError(); + }); + }); +}); diff --git a/libs/langchain-community/src/llms/tests/ibm.test.ts b/libs/langchain-community/src/llms/tests/ibm.test.ts new file mode 100644 index 000000000000..715b85e0cd4e --- /dev/null +++ b/libs/langchain-community/src/llms/tests/ibm.test.ts @@ -0,0 +1,207 @@ +/* eslint-disable no-process-env */ +import WatsonxAiMlVml_v1 from "@ibm-cloud/watsonx-ai/dist/watsonx-ai-ml/vml_v1.js"; +import { WatsonxLLM, WatsonxInputLLM } from "../ibm.js"; +import { authenticateAndSetInstance } from "../../utils/ibm.js"; +import { + WatsonxEmbeddings, + WatsonxEmbeddingsParams, +} from "../../embeddings/ibm.js"; + +const fakeAuthProp = { + watsonxAIAuthType: "iam", + watsonxAIApikey: "fake_key", +}; +export function getKey(key: K): K { + return key; +} +export const testProperties = ( + instance: WatsonxLLM | WatsonxEmbeddings, + testProps: WatsonxInputLLM, + notExTestProps?: { [key: string]: any } +) => { + const checkProperty = ( + testProps: T, + instance: T, + existing = true + ) => { + Object.keys(testProps).forEach((key) => { + const keys = getKey(key); + type Type = Pick; + + if (typeof testProps[key as keyof T] === "object") + checkProperty(testProps[key as keyof T], instance[key], existing); + else { + if (existing) + expect(instance[key as keyof T]).toBe(testProps[key as keyof T]); + else if (instance) expect(instance[key as keyof T]).toBeUndefined(); + } + }); + }; + checkProperty(testProps, instance); + if (notExTestProps) + checkProperty(notExTestProps, instance, false); +}; + +describe("LLM unit tests", () => { + describe("Positive tests", () => { + test("Test authentication function", () => { + const instance = authenticateAndSetInstance({ + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + ...fakeAuthProp, + }); + expect(instance).toBeInstanceOf(WatsonxAiMlVml_v1); + }); + + test("Test basic properties after init", async () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + const instance = new WatsonxLLM({ ...testProps, ...fakeAuthProp }); + + testProperties(instance, testProps); + }); + + test("Test methods after init", () => { + const testProps: WatsonxInputLLM = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + const instance = new WatsonxLLM({ + ...testProps, + ...fakeAuthProp, + }); + expect(instance.getNumTokens).toBeDefined(); + expect(instance._generate).toBeDefined(); + expect(instance._streamResponseChunks).toBeDefined(); + expect(instance.invocationParams).toBeDefined(); + }); + + test("Test properties after init", async () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + modelId: "ibm/granite-13b-chat-v2", + max_new_tokens: 100, + decoding_method: "sample", + length_penalty: { decay_factor: 1, start_index: 1 }, + min_new_tokens: 10, + random_seed: 1, + stop_sequences: ["hello"], + temperature: 0.1, + time_limit: 10000, + top_k: 1, + top_p: 1, + repetition_penalty: 1, + truncate_input_tokens: 1, + return_options: { + input_text: true, + generated_tokens: true, + input_tokens: true, + token_logprobs: true, + token_ranks: true, + + top_n_tokens: 2, + }, + include_stop_sequence: false, + maxRetries: 3, + maxConcurrency: 3, + }; + const instance = new WatsonxLLM({ ...testProps, ...fakeAuthProp }); + + testProperties(instance, testProps); + }); + }); + + describe("Negative tests", () => { + test("Missing id", async () => { + const testProps: WatsonxInputLLM = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + }; + expect( + () => + new WatsonxLLM({ + ...testProps, + ...fakeAuthProp, + }) + ).toThrowError(); + }); + + test("Missing other props", async () => { + // @ts-expect-error Intentionally passing not enough parameters + const testPropsProjectId: WatsonxInputLLM = { + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + + expect( + () => + new WatsonxLLM({ + ...testPropsProjectId, + ...fakeAuthProp, + }) + ).toThrowError(); + // @ts-expect-error Intentionally passing not enough parameters + const testPropsServiceUrl: WatsonxInputLLM = { + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + }; + expect( + () => + new WatsonxLLM({ + ...testPropsServiceUrl, + ...fakeAuthProp, + }) + ).toThrowError(); + const testPropsVersion = { + version: "2024-05-31", + }; + expect( + () => + new WatsonxLLM({ + // @ts-expect-error Intentionally passing wrong type of an object + testPropsVersion, + }) + ).toThrowError(); + }); + + test("Passing more than one id", async () => { + const testProps: WatsonxInputLLM = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + spaceId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + expect( + () => + new WatsonxLLM({ + ...testProps, + ...fakeAuthProp, + }) + ).toThrowError(); + }); + + test("Not existing property passed", async () => { + const testProps = { + version: "2024-05-31", + serviceUrl: process.env.WATSONX_AI_SERVICE_URL as string, + projectId: process.env.WATSONX_AI_PROJECT_ID || "testString", + }; + const notExTestProps = { + notExisting: 12, + notExObj: { + notExProp: 12, + }, + }; + const instance = new WatsonxLLM({ + ...testProps, + ...notExTestProps, + ...fakeAuthProp, + }); + testProperties(instance, testProps, notExTestProps); + }); + }); +}); diff --git a/libs/langchain-community/src/llms/watsonx_ai.ts b/libs/langchain-community/src/llms/watsonx_ai.ts index 2da98bb86bd7..ffe75459b661 100644 --- a/libs/langchain-community/src/llms/watsonx_ai.ts +++ b/libs/langchain-community/src/llms/watsonx_ai.ts @@ -1,3 +1,4 @@ +/** @deprecated Please use ibm instead */ import { type BaseLLMCallOptions, type BaseLLMParams, diff --git a/libs/langchain-community/src/load/import_constants.ts b/libs/langchain-community/src/load/import_constants.ts index 3044b499e239..3698a20e7809 100644 --- a/libs/langchain-community/src/load/import_constants.ts +++ b/libs/langchain-community/src/load/import_constants.ts @@ -14,6 +14,7 @@ export const optionalImportEntrypoints: string[] = [ "langchain_community/embeddings/gradient_ai", "langchain_community/embeddings/hf", "langchain_community/embeddings/hf_transformers", + "langchain_community/embeddings/ibm", "langchain_community/embeddings/jina", "langchain_community/embeddings/llama_cpp", "langchain_community/embeddings/premai", @@ -27,6 +28,7 @@ export const optionalImportEntrypoints: string[] = [ "langchain_community/llms/cohere", "langchain_community/llms/gradient_ai", "langchain_community/llms/hf", + "langchain_community/llms/ibm", "langchain_community/llms/llama_cpp", "langchain_community/llms/portkey", "langchain_community/llms/raycast", diff --git a/libs/langchain-community/src/load/import_type.ts b/libs/langchain-community/src/load/import_type.ts index 00f057aaa6a3..51536706142c 100644 --- a/libs/langchain-community/src/load/import_type.ts +++ b/libs/langchain-community/src/load/import_type.ts @@ -4,6 +4,7 @@ export interface OptionalImportMap {} export interface SecretMap { ALIBABA_API_KEY?: string; + AUTHENTICATOR?: string; AWS_ACCESS_KEY_ID?: string; AWS_SECRETE_ACCESS_KEY?: string; AWS_SECRET_ACCESS_KEY?: string; @@ -60,6 +61,12 @@ export interface SecretMap { VECTARA_API_KEY?: string; VECTARA_CORPUS_ID?: string; VECTARA_CUSTOMER_ID?: string; + WATSONX_AI_APIKEY?: string; + WATSONX_AI_AUTH_TYPE?: string; + WATSONX_AI_BEARER_TOKEN?: string; + WATSONX_AI_PASSWORD?: string; + WATSONX_AI_URL?: string; + WATSONX_AI_USERNAME?: string; WATSONX_PROJECT_ID?: string; WRITER_API_KEY?: string; WRITER_ORG_ID?: string; diff --git a/libs/langchain-community/src/types/watsonx_ai.ts b/libs/langchain-community/src/types/watsonx_ai.ts new file mode 100644 index 000000000000..4b7cfa1dda8f --- /dev/null +++ b/libs/langchain-community/src/types/watsonx_ai.ts @@ -0,0 +1,45 @@ +export interface TokenUsage { + generated_token_count: number; + input_token_count: number; +} +export interface WatsonxAuth { + watsonxAIApikey?: string; + watsonxAIBearerToken?: string; + watsonxAIUsername?: string; + watsonxAIPassword?: string; + watsonxAIUrl?: string; + watsonxAIAuthType?: string; +} + +export interface WatsonxInit { + authenticator?: string; + serviceUrl: string; + version: string; +} + +export interface WatsonxParams extends WatsonxInit { + modelId?: string; + spaceId?: string; + projectId?: string; + idOrName?: string; + maxConcurrency?: number; + maxRetries?: number; +} + +export interface GenerationInfo { + text: string; + stop_reason: string | undefined; + generated_token_count: number; + input_token_count: number; +} + +export interface ResponseChunk { + id: number; + event: string; + data: { + results: (TokenUsage & { + stop_reason?: string; + generated_text: string; + })[]; + }; +} diff --git a/libs/langchain-community/src/utils/ibm.ts b/libs/langchain-community/src/utils/ibm.ts new file mode 100644 index 000000000000..1bfab42418f7 --- /dev/null +++ b/libs/langchain-community/src/utils/ibm.ts @@ -0,0 +1,53 @@ +import { WatsonXAI } from "@ibm-cloud/watsonx-ai"; +import { + IamAuthenticator, + BearerTokenAuthenticator, + CloudPakForDataAuthenticator, +} from "ibm-cloud-sdk-core"; +import { WatsonxAuth, WatsonxInit } from "../types/watsonx_ai.js"; + +export const authenticateAndSetInstance = ({ + watsonxAIApikey, + watsonxAIAuthType, + watsonxAIBearerToken, + watsonxAIUsername, + watsonxAIPassword, + watsonxAIUrl, + version, + serviceUrl, +}: WatsonxAuth & Omit): WatsonXAI | undefined => { + if (watsonxAIAuthType === "iam" && watsonxAIApikey) { + return WatsonXAI.newInstance({ + version, + serviceUrl, + authenticator: new IamAuthenticator({ + apikey: watsonxAIApikey, + }), + }); + } else if (watsonxAIAuthType === "bearertoken" && watsonxAIBearerToken) { + return WatsonXAI.newInstance({ + version, + serviceUrl, + authenticator: new BearerTokenAuthenticator({ + bearerToken: watsonxAIBearerToken, + }), + }); + } else if (watsonxAIAuthType === "cp4d" && watsonxAIUrl) { + if (watsonxAIUsername && watsonxAIPassword && watsonxAIApikey) + return WatsonXAI.newInstance({ + version, + serviceUrl, + authenticator: new CloudPakForDataAuthenticator({ + username: watsonxAIUsername, + password: watsonxAIPassword, + url: watsonxAIUrl, + apikey: watsonxAIApikey, + }), + }); + } else + return WatsonXAI.newInstance({ + version, + serviceUrl, + }); + return undefined; +}; diff --git a/yarn.lock b/yarn.lock index 30f3d3bbe23b..2d06c2a21b59 100644 --- a/yarn.lock +++ b/yarn.lock @@ -10495,6 +10495,17 @@ __metadata: languageName: node linkType: hard +"@ibm-cloud/watsonx-ai@npm:1.0.1": + version: 1.0.1 + resolution: "@ibm-cloud/watsonx-ai@npm:1.0.1" + dependencies: + "@types/node": ^12.0.8 + extend: 3.0.2 + ibm-cloud-sdk-core: ^4.2.5 + checksum: 843aa748f2568e5850df7718c38897dd668bc560426a339974cbd481ddf04579a54d661aa16b84d29c2b4d8aab0925ca3fbe78c31beb82e7cdc9ca02bd7a4e07 + languageName: node + linkType: hard + "@inquirer/figures@npm:^1.0.3": version: 1.0.5 resolution: "@inquirer/figures@npm:1.0.5" @@ -11463,6 +11474,7 @@ __metadata: "@google-cloud/storage": ^7.7.0 "@gradientai/nodejs-sdk": ^1.2.0 "@huggingface/inference": ^2.6.4 + "@ibm-cloud/watsonx-ai": 1.0.1 "@jest/globals": ^29.5.0 "@langchain/core": "workspace:*" "@langchain/openai": ">=0.2.0 <0.4.0" @@ -11559,6 +11571,7 @@ __metadata: hdb: 0.19.8 hnswlib-node: ^3.0.0 html-to-text: ^9.0.5 + ibm-cloud-sdk-core: ^5.0.2 ignore: ^5.2.0 interface-datastore: ^8.2.11 ioredis: ^5.3.2 @@ -18574,6 +18587,15 @@ __metadata: languageName: node linkType: hard +"@types/debug@npm:^4.1.12": + version: 4.1.12 + resolution: "@types/debug@npm:4.1.12" + dependencies: + "@types/ms": "*" + checksum: 47876a852de8240bfdaf7481357af2b88cb660d30c72e73789abf00c499d6bc7cd5e52f41c915d1b9cd8ec9fef5b05688d7b7aef17f7f272c2d04679508d1053 + languageName: node + linkType: hard + "@types/decamelize@npm:^1.2.0": version: 1.2.0 resolution: "@types/decamelize@npm:1.2.0" @@ -18986,6 +19008,13 @@ __metadata: languageName: node linkType: hard +"@types/ms@npm:*": + version: 0.7.34 + resolution: "@types/ms@npm:0.7.34" + checksum: f38d36e7b6edecd9badc9cf50474159e9da5fa6965a75186cceaf883278611b9df6669dc3a3cc122b7938d317b68a9e3d573d316fcb35d1be47ec9e468c6bd8a + languageName: node + linkType: hard + "@types/mustache@npm:^4": version: 4.2.5 resolution: "@types/mustache@npm:4.2.5" @@ -19053,6 +19082,13 @@ __metadata: languageName: node linkType: hard +"@types/node@npm:^12.0.8": + version: 12.20.55 + resolution: "@types/node@npm:12.20.55" + checksum: e4f86785f4092706e0d3b0edff8dca5a13b45627e4b36700acd8dfe6ad53db71928c8dee914d4276c7fd3b6ccd829aa919811c9eb708a2c8e4c6eb3701178c37 + languageName: node + linkType: hard + "@types/node@npm:^17.0.5": version: 17.0.45 resolution: "@types/node@npm:17.0.45" @@ -19108,6 +19144,13 @@ __metadata: languageName: node linkType: hard +"@types/node@npm:~10.14.19": + version: 10.14.22 + resolution: "@types/node@npm:10.14.22" + checksum: 5dc12f9f284afe195584bfa553b3bd46828f0f568e1a349dcd7e6357d81fa82f8b3cd454f375b7478ad8e9b93a6e3341f102960ead4619829e5e82ea2bd8d204 + languageName: node + linkType: hard + "@types/offscreencanvas@npm:~2019.3.0": version: 2019.3.0 resolution: "@types/offscreencanvas@npm:2019.3.0" @@ -21319,6 +21362,17 @@ __metadata: languageName: node linkType: hard +"axios@npm:1.7.4": + version: 1.7.4 + resolution: "axios@npm:1.7.4" + dependencies: + follow-redirects: ^1.15.6 + form-data: ^4.0.0 + proxy-from-env: ^1.1.0 + checksum: 0c17039a9acfe6a566fca8431ba5c1b455c83d30ea6157fec68a6722878fcd30f3bd32d172f6bee0c51fe75ca98e6414ddcd968a87b5606b573731629440bfaf + languageName: node + linkType: hard + "axios@npm:^0.25.0": version: 0.25.0 resolution: "axios@npm:0.25.0" @@ -21381,6 +21435,17 @@ __metadata: languageName: node linkType: hard +"axios@npm:^1.7.5": + version: 1.7.7 + resolution: "axios@npm:1.7.7" + dependencies: + follow-redirects: ^1.15.6 + form-data: ^4.0.0 + proxy-from-env: ^1.1.0 + checksum: 882d4fe0ec694a07c7f5c1f68205eb6dc5a62aecdb632cc7a4a3d0985188ce3030e0b277e1a8260ac3f194d314ae342117660a151fabffdc5081ca0b5a8b47fe + languageName: node + linkType: hard + "axobject-query@npm:^3.1.1, axobject-query@npm:^3.2.1": version: 3.2.1 resolution: "axobject-query@npm:3.2.1" @@ -22304,7 +22369,7 @@ __metadata: languageName: node linkType: hard -"camelcase@npm:6, camelcase@npm:^6.2.0": +"camelcase@npm:6, camelcase@npm:^6.2.0, camelcase@npm:^6.3.0": version: 6.3.0 resolution: "camelcase@npm:6.3.0" checksum: 8c96818a9076434998511251dcb2761a94817ea17dbdc37f47ac080bd088fc62c7369429a19e2178b993497132c8cbcf5cc1f44ba963e76782ba469c0474938d @@ -25050,6 +25115,13 @@ __metadata: languageName: node linkType: hard +"diff-sequences@npm:^27.5.1": + version: 27.5.1 + resolution: "diff-sequences@npm:27.5.1" + checksum: a00db5554c9da7da225db2d2638d85f8e41124eccbd56cbaefb3b276dcbb1c1c2ad851c32defe2055a54a4806f030656cbf6638105fd6ce97bb87b90b32a33ca + languageName: node + linkType: hard + "diff-sequences@npm:^29.4.3": version: 29.4.3 resolution: "diff-sequences@npm:29.4.3" @@ -27272,6 +27344,18 @@ __metadata: languageName: node linkType: hard +"expect@npm:^27.5.1": + version: 27.5.1 + resolution: "expect@npm:27.5.1" + dependencies: + "@jest/types": ^27.5.1 + jest-get-type: ^27.5.1 + jest-matcher-utils: ^27.5.1 + jest-message-util: ^27.5.1 + checksum: b2c66beb52de53ef1872165aace40224e722bca3c2274c54cfa74b6d617d55cf0ccdbf36783ccd64dbea501b280098ed33fd0b207d4f15bc03cd3c7a24364a6a + languageName: node + linkType: hard + "expect@npm:^29.0.0": version: 29.6.1 resolution: "expect@npm:29.6.1" @@ -27422,7 +27506,7 @@ __metadata: languageName: node linkType: hard -"extend@npm:^3.0.0, extend@npm:^3.0.2": +"extend@npm:3.0.2, extend@npm:^3.0.0, extend@npm:^3.0.2": version: 3.0.2 resolution: "extend@npm:3.0.2" checksum: a50a8309ca65ea5d426382ff09f33586527882cf532931cb08ca786ea3146c0553310bda688710ff61d7668eba9f96b923fe1420cdf56a2c3eaf30fcab87b515 @@ -27765,7 +27849,7 @@ __metadata: languageName: node linkType: hard -"file-type@npm:^16.5.4": +"file-type@npm:16.5.4, file-type@npm:^16.5.4": version: 16.5.4 resolution: "file-type@npm:16.5.4" dependencies: @@ -29930,6 +30014,53 @@ __metadata: languageName: node linkType: hard +"ibm-cloud-sdk-core@npm:^4.2.5": + version: 4.3.4 + resolution: "ibm-cloud-sdk-core@npm:4.3.4" + dependencies: + "@types/debug": ^4.1.12 + "@types/node": ~10.14.19 + "@types/tough-cookie": ^4.0.0 + axios: ^1.7.5 + camelcase: ^6.3.0 + debug: ^4.3.4 + dotenv: ^16.4.5 + expect: ^27.5.1 + extend: 3.0.2 + file-type: 16.5.4 + form-data: 4.0.0 + isstream: 0.1.2 + jsonwebtoken: ^9.0.2 + mime-types: 2.1.35 + retry-axios: ^2.6.0 + tough-cookie: ^4.1.3 + checksum: 27d6bd692cde66766a7cea36e75d53a6a089e2b2b726cf86108ab48f9d452bb6d6a01324d2160e3bb54df7750240129bae989934ab2fd80c0950ecdb5bfc07b3 + languageName: node + linkType: hard + +"ibm-cloud-sdk-core@npm:^5.0.2": + version: 5.0.2 + resolution: "ibm-cloud-sdk-core@npm:5.0.2" + dependencies: + "@types/debug": ^4.1.12 + "@types/node": ~10.14.19 + "@types/tough-cookie": ^4.0.0 + axios: 1.7.4 + camelcase: ^6.3.0 + debug: ^4.3.4 + dotenv: ^16.4.5 + extend: 3.0.2 + file-type: 16.5.4 + form-data: 4.0.0 + isstream: 0.1.2 + jsonwebtoken: ^9.0.2 + mime-types: 2.1.35 + retry-axios: ^2.6.0 + tough-cookie: ^4.1.3 + checksum: fed92b851f816cbe94f4f28c6b45eed3b214f570897ed9936e5b7fd332b8c25c599f49d96866b9d936499b39a65e4d9db5a3191940b5a2489656e966e8fa6526 + languageName: node + linkType: hard + "iconv-lite@npm:0.4, iconv-lite@npm:0.4.24, iconv-lite@npm:^0.4.18, iconv-lite@npm:^0.4.24": version: 0.4.24 resolution: "iconv-lite@npm:0.4.24" @@ -31086,6 +31217,13 @@ __metadata: languageName: node linkType: hard +"isstream@npm:0.1.2": + version: 0.1.2 + resolution: "isstream@npm:0.1.2" + checksum: 1eb2fe63a729f7bdd8a559ab552c69055f4f48eb5c2f03724430587c6f450783c8f1cd936c1c952d0a927925180fcc892ebd5b174236cf1065d4bd5bdb37e963 + languageName: node + linkType: hard + "issue-parser@npm:6.0.0": version: 6.0.0 resolution: "issue-parser@npm:6.0.0" @@ -31421,6 +31559,18 @@ __metadata: languageName: node linkType: hard +"jest-diff@npm:^27.5.1": + version: 27.5.1 + resolution: "jest-diff@npm:27.5.1" + dependencies: + chalk: ^4.0.0 + diff-sequences: ^27.5.1 + jest-get-type: ^27.5.1 + pretty-format: ^27.5.1 + checksum: 8be27c1e1ee57b2bb2bef9c0b233c19621b4c43d53a3c26e2c00a4e805eb4ea11fe1694a06a9fb0e80ffdcfdc0d2b1cb0b85920b3f5c892327ecd1e7bd96b865 + languageName: node + linkType: hard + "jest-diff@npm:^29.5.0": version: 29.5.0 resolution: "jest-diff@npm:29.5.0" @@ -31507,6 +31657,13 @@ __metadata: languageName: node linkType: hard +"jest-get-type@npm:^27.5.1": + version: 27.5.1 + resolution: "jest-get-type@npm:27.5.1" + checksum: 63064ab70195c21007d897c1157bf88ff94a790824a10f8c890392e7d17eda9c3900513cb291ca1c8d5722cad79169764e9a1279f7c8a9c4cd6e9109ff04bbc0 + languageName: node + linkType: hard + "jest-get-type@npm:^29.4.3": version: 29.4.3 resolution: "jest-get-type@npm:29.4.3" @@ -31577,6 +31734,18 @@ __metadata: languageName: node linkType: hard +"jest-matcher-utils@npm:^27.5.1": + version: 27.5.1 + resolution: "jest-matcher-utils@npm:27.5.1" + dependencies: + chalk: ^4.0.0 + jest-diff: ^27.5.1 + jest-get-type: ^27.5.1 + pretty-format: ^27.5.1 + checksum: bb2135fc48889ff3fe73888f6cc7168ddab9de28b51b3148f820c89fdfd2effdcad005f18be67d0b9be80eda208ad47290f62f03d0a33f848db2dd0273c8217a + languageName: node + linkType: hard + "jest-matcher-utils@npm:^29.5.0": version: 29.5.0 resolution: "jest-matcher-utils@npm:29.5.0" @@ -31613,6 +31782,23 @@ __metadata: languageName: node linkType: hard +"jest-message-util@npm:^27.5.1": + version: 27.5.1 + resolution: "jest-message-util@npm:27.5.1" + dependencies: + "@babel/code-frame": ^7.12.13 + "@jest/types": ^27.5.1 + "@types/stack-utils": ^2.0.0 + chalk: ^4.0.0 + graceful-fs: ^4.2.9 + micromatch: ^4.0.4 + pretty-format: ^27.5.1 + slash: ^3.0.0 + stack-utils: ^2.0.3 + checksum: eb6d637d1411c71646de578c49826b6da8e33dd293e501967011de9d1916d53d845afbfb52a5b661ff1c495be7c13f751c48c7f30781fd94fbd64842e8195796 + languageName: node + linkType: hard + "jest-message-util@npm:^29.5.0": version: 29.5.0 resolution: "jest-message-util@npm:29.5.0" @@ -37038,6 +37224,17 @@ __metadata: languageName: node linkType: hard +"pretty-format@npm:^27.5.1": + version: 27.5.1 + resolution: "pretty-format@npm:27.5.1" + dependencies: + ansi-regex: ^5.0.1 + ansi-styles: ^5.0.0 + react-is: ^17.0.1 + checksum: cf610cffcb793885d16f184a62162f2dd0df31642d9a18edf4ca298e909a8fe80bdbf556d5c9573992c102ce8bf948691da91bf9739bee0ffb6e79c8a8a6e088 + languageName: node + linkType: hard + "pretty-format@npm:^29.0.0, pretty-format@npm:^29.6.1": version: 29.6.1 resolution: "pretty-format@npm:29.6.1" @@ -37711,6 +37908,13 @@ __metadata: languageName: node linkType: hard +"react-is@npm:^17.0.1": + version: 17.0.2 + resolution: "react-is@npm:17.0.2" + checksum: 9d6d111d8990dc98bc5402c1266a808b0459b5d54830bbea24c12d908b536df7883f268a7868cfaedde3dd9d4e0d574db456f84d2e6df9c4526f99bb4b5344d8 + languageName: node + linkType: hard + "react-is@npm:^18.0.0": version: 18.2.0 resolution: "react-is@npm:18.2.0" @@ -38563,6 +38767,15 @@ __metadata: languageName: node linkType: hard +"retry-axios@npm:^2.6.0": + version: 2.6.0 + resolution: "retry-axios@npm:2.6.0" + peerDependencies: + axios: "*" + checksum: cf7e63d89f00ead2633e60f00b504ec10217db8165327879b6feb0fa787fffe687d06ee145b2f43d2b4ea8916d42c951d34ee32ee1ea47c9d0b602d4963bd7f9 + languageName: node + linkType: hard + "retry-request@npm:^7.0.0": version: 7.0.2 resolution: "retry-request@npm:7.0.2"