Skip to content

Commit

Permalink
feat: map vercel to same format (#358)
Browse files Browse the repository at this point in the history
  • Loading branch information
k11kirky authored Jan 22, 2025
1 parent b649be7 commit d4342f6
Show file tree
Hide file tree
Showing 5 changed files with 104 additions and 69 deletions.
23 changes: 10 additions & 13 deletions posthog-ai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,25 +14,22 @@ npm install @posthog/ai
import { OpenAI } from '@posthog/ai'
import { PostHog } from 'posthog-node'

const phClient = new PostHog(
'<YOUR_PROJECT_API_KEY>',
{ host: 'https://us.i.posthog.com' }
);
const phClient = new PostHog('<YOUR_PROJECT_API_KEY>', { host: 'https://us.i.posthog.com' })

const client = new OpenAI({
apiKey: '<YOUR_OPENAI_API_KEY>',
posthog: phClient,
});
})

const completion = await client.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [{ role: "user", content: "Tell me a fun fact about hedgehogs" }],
posthogDistinctId: "user_123", // optional
posthogTraceId: "trace_123", // optional
posthogProperties: { conversation_id: "abc123", paid: true }, //optional
posthogGroups: { company: "company_id_in_your_db" }, // optional
posthogPrivacyMode: false // optional
});
model: 'gpt-3.5-turbo',
messages: [{ role: 'user', content: 'Tell me a fun fact about hedgehogs' }],
posthogDistinctId: 'user_123', // optional
posthogTraceId: 'trace_123', // optional
posthogProperties: { conversation_id: 'abc123', paid: true }, //optional
posthogGroups: { company: 'company_id_in_your_db' }, // optional
posthogPrivacyMode: false, // optional
})

console.log(completion.choices[0].message.content)

Expand Down
2 changes: 1 addition & 1 deletion posthog-ai/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@posthog/ai",
"version": "2.0.1",
"version": "2.1.0",
"description": "PostHog Node.js AI integrations",
"repository": {
"type": "git",
Expand Down
22 changes: 11 additions & 11 deletions posthog-ai/src/openai/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,9 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
return parentPromise.then((value) => {
const passThroughStream = new PassThrough({ objectMode: true })
let accumulatedContent = ''
let usage: { input_tokens: number; output_tokens: number } = {
input_tokens: 0,
output_tokens: 0,
let usage: { inputTokens: number; outputTokens: number } = {
inputTokens: 0,
outputTokens: 0,
}
if ('tee' in value) {
const openAIStream = value
Expand All @@ -102,8 +102,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
accumulatedContent += delta
if (chunk.usage) {
usage = {
input_tokens: chunk.usage.prompt_tokens ?? 0,
output_tokens: chunk.usage.completion_tokens ?? 0,
inputTokens: chunk.usage.prompt_tokens ?? 0,
outputTokens: chunk.usage.completion_tokens ?? 0,
}
}
passThroughStream.write(chunk)
Expand Down Expand Up @@ -139,8 +139,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
params: body,
httpStatus: 500,
usage: {
input_tokens: 0,
output_tokens: 0,
inputTokens: 0,
outputTokens: 0,
},
})
passThroughStream.emit('error', error)
Expand All @@ -167,8 +167,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
params: body,
httpStatus: 200,
usage: {
input_tokens: result.usage?.prompt_tokens ?? 0,
output_tokens: result.usage?.completion_tokens ?? 0,
inputTokens: result.usage?.prompt_tokens ?? 0,
outputTokens: result.usage?.completion_tokens ?? 0,
},
})
}
Expand All @@ -188,8 +188,8 @@ export class WrappedCompletions extends OpenAIOrignal.Chat.Completions {
params: body,
httpStatus: 500,
usage: {
input_tokens: 0,
output_tokens: 0,
inputTokens: 0,
outputTokens: 0,
},
})
throw error
Expand Down
38 changes: 15 additions & 23 deletions posthog-ai/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,26 +34,6 @@ export const getModelParams = (params: ChatCompletionCreateParamsBase & Monitori
return modelParams
}

export const getUsage = (response: any, provider: string): { input_tokens: number; output_tokens: number } => {
if (!response?.usage) {
return { input_tokens: 0, output_tokens: 0 }
}

if (provider === 'anthropic') {
return {
input_tokens: response.usage.input_tokens ?? 0,
output_tokens: response.usage.output_tokens ?? 0,
}
} else if (provider === 'openai') {
return {
input_tokens: response.usage.prompt_tokens ?? 0,
output_tokens: response.usage.completion_tokens ?? 0,
}
}

return { input_tokens: 0, output_tokens: 0 }
}

/**
* Helper to format responses (non-streaming) for consumption, mirroring Python's openai vs. anthropic approach.
*/
Expand Down Expand Up @@ -123,7 +103,7 @@ export type SendEventToPosthogParams = {
latency: number
baseURL: string
httpStatus: number
usage?: { input_tokens?: number; output_tokens?: number }
usage?: { inputTokens?: number; outputTokens?: number }
params: ChatCompletionCreateParamsBase & MonitoringParams
}

Expand All @@ -141,6 +121,18 @@ export const sendEventToPosthog = ({
httpStatus = 200,
usage = {},
}: SendEventToPosthogParams): void => {
console.log('sendEventToPosthog', {
client,
distinctId,
traceId,
model,
provider,
input,
output,
latency,
baseURL,
params,
})
if (client.capture) {
client.capture({
distinctId: distinctId ?? traceId,
Expand All @@ -152,8 +144,8 @@ export const sendEventToPosthog = ({
$ai_input: withPrivacyMode(client, params.posthogPrivacyMode ?? false, input),
$ai_output_choices: withPrivacyMode(client, params.posthogPrivacyMode ?? false, output),
$ai_http_status: httpStatus,
$ai_input_tokens: usage.input_tokens ?? 0,
$ai_output_tokens: usage.output_tokens ?? 0,
$ai_input_tokens: usage.inputTokens ?? 0,
$ai_output_tokens: usage.outputTokens ?? 0,
$ai_latency: latency,
$ai_trace_id: traceId,
$ai_base_url: baseURL,
Expand Down
88 changes: 67 additions & 21 deletions posthog-ai/src/vercel/middleware.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,58 @@ import { experimental_wrapLanguageModel as wrapLanguageModel } from 'ai'
import type {
LanguageModelV1,
Experimental_LanguageModelV1Middleware as LanguageModelV1Middleware,
LanguageModelV1Prompt,
LanguageModelV1StreamPart,
} from 'ai'
import { v4 as uuidv4 } from 'uuid'
import type { PostHog } from 'posthog-node'
import { PostHog } from 'posthog-node'
import { sendEventToPosthog } from '../utils'

interface CreateInstrumentationMiddlewareOptions {
posthogDistinctId?: string
posthogTraceId: string
posthogProperties?: Record<string, any>
posthogPrivacyMode?: boolean
posthogGroups?: string[]
posthogGroups?: Record<string, any>
}

interface PostHogInput {
content: string
role: string
}

const mapVercelParams = (params: any): Record<string, any> => {
return {
temperature: params.temperature,
max_tokens: params.maxTokens,
top_p: params.topP,
frequency_penalty: params.frequencyPenalty,
presence_penalty: params.presencePenalty,
stop: params.stopSequences,
stream: params.stream,
}
}

const mapVercelPrompt = (prompt: LanguageModelV1Prompt): PostHogInput[] => {
return prompt.map((p) => {
let content = ''
if (Array.isArray(p.content)) {
content = p.content
.map((c) => {
if (c.type === 'text') {
return c.text
}
return ''
})
.join('')
} else {
content = p.content
}
return {
role: p.role,
content,
}
})
}

export const createInstrumentationMiddleware = (
Expand All @@ -24,7 +64,10 @@ export const createInstrumentationMiddleware = (
const middleware: LanguageModelV1Middleware = {
wrapGenerate: async ({ doGenerate, params }) => {
const startTime = Date.now()

let mergedParams = {
...options,
...mapVercelParams(params),
}
try {
const result = await doGenerate()
const latency = (Date.now() - startTime) / 1000
Expand All @@ -35,15 +78,15 @@ export const createInstrumentationMiddleware = (
traceId: options.posthogTraceId,
model: model.modelId,
provider: 'vercel',
input: options.posthogPrivacyMode ? '' : params.prompt,
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
output: [{ content: result.text, role: 'assistant' }],
latency,
baseURL: '',
params: { posthog_properties: options } as any,
params: mergedParams as any,
httpStatus: 200,
usage: {
input_tokens: result.usage.promptTokens,
output_tokens: result.usage.completionTokens,
inputTokens: result.usage.promptTokens,
outputTokens: result.usage.completionTokens,
},
})

Expand All @@ -55,15 +98,15 @@ export const createInstrumentationMiddleware = (
traceId: options.posthogTraceId,
model: model.modelId,
provider: 'vercel',
input: options.posthogPrivacyMode ? '' : params.prompt,
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
output: [],
latency: 0,
baseURL: '',
params: { posthog_properties: options } as any,
params: mergedParams as any,
httpStatus: 500,
usage: {
input_tokens: 0,
output_tokens: 0,
inputTokens: 0,
outputTokens: 0,
},
})
throw error
Expand All @@ -73,8 +116,11 @@ export const createInstrumentationMiddleware = (
wrapStream: async ({ doStream, params }) => {
const startTime = Date.now()
let generatedText = ''
let usage: { input_tokens?: number; output_tokens?: number } = {}

let usage: { inputTokens?: number; outputTokens?: number } = {}
let mergedParams = {
...options,
...mapVercelParams(params),
}
try {
const { stream, ...rest } = await doStream()

Expand All @@ -85,8 +131,8 @@ export const createInstrumentationMiddleware = (
}
if (chunk.type === 'finish') {
usage = {
input_tokens: chunk.usage?.promptTokens,
output_tokens: chunk.usage?.completionTokens,
inputTokens: chunk.usage?.promptTokens,
outputTokens: chunk.usage?.completionTokens,
}
}
controller.enqueue(chunk)
Expand All @@ -100,11 +146,11 @@ export const createInstrumentationMiddleware = (
traceId: options.posthogTraceId,
model: model.modelId,
provider: 'vercel',
input: options.posthogPrivacyMode ? '' : params.prompt,
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
output: [{ content: generatedText, role: 'assistant' }],
latency,
baseURL: '',
params: { posthog_properties: options } as any,
params: mergedParams as any,
httpStatus: 200,
usage,
})
Expand All @@ -122,15 +168,15 @@ export const createInstrumentationMiddleware = (
traceId: options.posthogTraceId,
model: model.modelId,
provider: 'vercel',
input: options.posthogPrivacyMode ? '' : params.prompt,
input: options.posthogPrivacyMode ? '' : mapVercelPrompt(params.prompt),
output: [],
latency: 0,
baseURL: '',
params: { posthog_properties: options } as any,
params: mergedParams as any,
httpStatus: 500,
usage: {
input_tokens: 0,
output_tokens: 0,
inputTokens: 0,
outputTokens: 0,
},
})
throw error
Expand Down

0 comments on commit d4342f6

Please sign in to comment.