Skip to content

Commit

Permalink
feat: pass KurtSamplingOptions to the underlying LLM provider
Browse files Browse the repository at this point in the history
This is a followup to PR #24, which added `KurtSamplingOptions` to the `Kurt` class.

Now, with this PR, the LLM-specific adapters (`KurtOpenAI` and `KurtVertexAI`) will now pass these
options along to the underlying LLM provider, in the appropriate adapted way.

This commit also adds tests which demonstrate the passing of these options.
  • Loading branch information
jemc committed May 30, 2024
1 parent bc639d5 commit 2e4be5d
Show file tree
Hide file tree
Showing 16 changed files with 273 additions and 1 deletion.
20 changes: 20 additions & 0 deletions packages/kurt-open-ai/spec/generateNaturalLanguage.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,24 @@ describe("KurtOpenAI generateNaturalLanguage", () => {
)
expect(result.text).toEqual("Hello! How can I assist you today?")
})

test("writes a haiku with high temperature", async () => {
const result = await snapshotAndMock((kurt) =>
kurt.generateNaturalLanguage({
prompt: "Compose a haiku about a mountain stream at night.",
sampling: {
maxOutputTokens: 100,
temperature: 1.0,
topP: 1.0,
},
})
)
expect(result.text).toEqual(
[
"Shaft of moonlight glows",
"Mountain stream flows silently",
"Nature's lullaby",
].join("\n")
)
})
})
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
step1Request:
stream: true
model: gpt-3.5-turbo-0125
max_tokens: 4096
temperature: 0.5
top_p: 0.95
messages:
- role: user
content: Say hello!
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
step1Request:
stream: true
model: gpt-3.5-turbo-0125
max_tokens: 100
temperature: 1
top_p: 1
messages:
- role: user
content: Compose a haiku about a mountain stream at night.
step2RawChunks:
- index: 0
delta:
role: assistant
content: ""
logprobs: null
finish_reason: null
- index: 0
delta:
content: Sha
logprobs: null
finish_reason: null
- index: 0
delta:
content: ft
logprobs: null
finish_reason: null
- index: 0
delta:
content: " of"
logprobs: null
finish_reason: null
- index: 0
delta:
content: " moon"
logprobs: null
finish_reason: null
- index: 0
delta:
content: light
logprobs: null
finish_reason: null
- index: 0
delta:
content: " gl"
logprobs: null
finish_reason: null
- index: 0
delta:
content: ows
logprobs: null
finish_reason: null
- index: 0
delta:
content: "\n"
logprobs: null
finish_reason: null
- index: 0
delta:
content: Mountain
logprobs: null
finish_reason: null
- index: 0
delta:
content: " stream"
logprobs: null
finish_reason: null
- index: 0
delta:
content: " flows"
logprobs: null
finish_reason: null
- index: 0
delta:
content: " silently"
logprobs: null
finish_reason: null
- index: 0
delta:
content: "\n"
logprobs: null
finish_reason: null
- index: 0
delta:
content: Nature
logprobs: null
finish_reason: null
- index: 0
delta:
content: "'s"
logprobs: null
finish_reason: null
- index: 0
delta:
content: " l"
logprobs: null
finish_reason: null
- index: 0
delta:
content: ull
logprobs: null
finish_reason: null
- index: 0
delta:
content: aby
logprobs: null
finish_reason: null
- index: 0
delta: {}
logprobs: null
finish_reason: stop
step3KurtEvents:
- chunk: Sha
- chunk: ft
- chunk: " of"
- chunk: " moon"
- chunk: light
- chunk: " gl"
- chunk: ows
- chunk: "\n"
- chunk: Mountain
- chunk: " stream"
- chunk: " flows"
- chunk: " silently"
- chunk: "\n"
- chunk: Nature
- chunk: "'s"
- chunk: " l"
- chunk: ull
- chunk: aby
- finished: true
text: |-
Shaft of moonlight glows
Mountain stream flows silently
Nature's lullaby
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
step1Request:
stream: true
model: gpt-3.5-turbo-0125
max_tokens: 4096
temperature: 0.5
top_p: 0.95
messages:
- role: user
content: Say hello!
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
step1Request:
stream: true
model: gpt-3.5-turbo-0125
max_tokens: 4096
temperature: 0.5
top_p: 0.95
messages:
- role: user
content: What's 9876356 divided by 30487, rounded to the nearest integer?
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
step1Request:
stream: true
model: gpt-3.5-turbo-0125
max_tokens: 4096
temperature: 0.5
top_p: 0.95
messages:
- role: user
content: What's 9876356 divided by 30487, rounded to the nearest integer?
Expand Down
5 changes: 5 additions & 0 deletions packages/kurt-open-ai/src/KurtOpenAI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import type {
KurtSchemaResult,
KurtSchemaResultMaybe,
KurtSchema,
KurtSamplingOptions,
} from "@formula-monks/kurt"
import type {
OpenAI,
Expand Down Expand Up @@ -83,12 +84,16 @@ export class KurtOpenAI

generateRawEvents(options: {
messages: OpenAIMessage[]
sampling: Required<KurtSamplingOptions>
tools: { [key: string]: OpenAITool }
forceTool?: string
}): AsyncIterable<OpenAIResponseChunk> {
const req: OpenAIRequest = {
stream: true,
model: this.options.model,
max_tokens: options.sampling.maxOutputTokens,
temperature: options.sampling.temperature,
top_p: options.sampling.topP,
messages: options.messages,
}

Expand Down
20 changes: 20 additions & 0 deletions packages/kurt-vertex-ai/spec/generateNaturalLanguage.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,24 @@ describe("KurtVertexAI generateNaturalLanguage", () => {
)
expect(result.text).toEqual("Hello! How can I assist you today?")
})

test("writes a haiku with high temperature", async () => {
const result = await snapshotAndMock((kurt) =>
kurt.generateNaturalLanguage({
prompt: "Compose a haiku about a mountain stream at night.",
sampling: {
maxOutputTokens: 100,
temperature: 1.0,
topP: 1.0,
},
})
)
expect(result.text).toEqual(
[
"Moon paints silver path,",
"Water sings to sleeping stones,",
"Night sighs on the wind.",
].join("\n")
)
})
})
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
step1Request:
generationConfig:
maxOutputTokens: 100
temperature: 1
topP: 1
contents:
- role: user
parts:
- text: Compose a haiku about a mountain stream at night.
step2RawChunks:
- content:
role: model
parts:
- text: Moon
index: 0
- content:
role: model
parts:
- text: |2-
paints silver path,
Water sings to sleeping stones,
Night sighs on the
index: 0
- content:
role: model
parts:
- text: |2-
wind.
index: 0
- content:
role: model
parts:
- text: ""
finishReason: STOP
index: 0
step3KurtEvents:
- chunk: Moon
- chunk: |2-
paints silver path,
Water sings to sleeping stones,
Night sighs on the
- chunk: |2-
wind.
- finished: true
text: |-
Moon paints silver path,
Water sings to sleeping stones,
Night sighs on the wind.
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
step1Request:
generationConfig:
maxOutputTokens: 4096
temperature: 0.5
topP: 0.95
contents:
- role: user
parts:
Expand Down
11 changes: 10 additions & 1 deletion packages/kurt-vertex-ai/src/KurtVertexAI.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import type {
KurtSchemaResult,
KurtSchemaResultMaybe,
KurtMessage,
KurtSamplingOptions,
} from "@formula-monks/kurt"
import type {
VertexAI,
Expand Down Expand Up @@ -74,14 +75,22 @@ export class KurtVertexAI

generateRawEvents(options: {
messages: VertexAIMessage[]
sampling: Required<KurtSamplingOptions>
tools: { [key: string]: VertexAITool }
forceTool?: string | undefined
}): AsyncIterable<VertexAIResponseChunk> {
const llm = this.options.vertexAI.getGenerativeModel({
model: this.options.model,
}) as VertexAIGenerativeModel

const req: VertexAIRequest = { contents: options.messages }
const req: VertexAIRequest = {
generationConfig: {
maxOutputTokens: options.sampling.maxOutputTokens,
temperature: options.sampling.temperature,
topP: options.sampling.topP,
},
contents: options.messages,
}

const tools = Object.values(options.tools)
if (tools.length > 0) req.tools = [{ functionDeclarations: tools }]
Expand Down

0 comments on commit 2e4be5d

Please sign in to comment.