Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Initial Pulze Provider #153

Open
wants to merge 19 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 19 additions & 10 deletions pkg/plugin/health.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,11 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/build"
)

var openAIModels = []string{"gpt-3.5-turbo", "gpt-4"}
// Define models for each provider to be included in the health check.
var providerModels = map[string][]string{
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think this would be more type-safe if it was a map[openAIProvider][]string

"openai": {"gpt-3.5-turbo", "gpt-4"},
"pulze": {"pulze", "openai/gpt-4"},
}

type healthCheckClient interface {
Do(req *http.Request) (*http.Response, error)
Expand Down Expand Up @@ -79,20 +83,24 @@ func (a *App) testOpenAIModel(ctx context.Context, model string) error {
return nil
}

// openAIHealth checks the health of the OpenAI configuration and caches the
// openAIHealth performs a health check for the selected provider and caches the
// result if successful. The caller must lock a.healthCheckMutex.
func (a *App) openAIHealth(ctx context.Context, req *backend.CheckHealthRequest) (openAIHealthDetails, error) {
func (a *App) openAIHealth(ctx context.Context, req *backend.CheckHealthRequest) openAIHealthDetails {
if a.healthOpenAI != nil {
return *a.healthOpenAI, nil
return *a.healthOpenAI
}

d := openAIHealthDetails{
OK: true,
Configured: a.settings.OpenAI.apiKey != "" || a.settings.OpenAI.Provider == openAIProviderGrafana,
Models: map[string]openAIModelHealth{},
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are we removing the condition for openAIProviderGrafana here?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I've changed the logic here (e.g. all "providers" are listed in providerModels now), hence the lookup can use a.settings.OpenAI.Provider.

}
models := providerModels["openai"]
if a.settings.OpenAI.Provider == openAIProviderPulze {
models = providerModels["pulze"]
}

for _, model := range openAIModels {
for _, model := range models {
health := openAIModelHealth{OK: false, Error: "OpenAI not configured"}
if d.Configured {
health.OK = true
Expand Down Expand Up @@ -121,7 +129,7 @@ func (a *App) openAIHealth(ctx context.Context, req *backend.CheckHealthRequest)
if d.OK {
a.healthOpenAI = &d
}
return d, nil
return d
}

// testVectorService checks the health of VectorAPI and caches the result if
Expand All @@ -137,6 +145,8 @@ func (a *App) testVectorService(ctx context.Context) error {
return nil
}

// vectorHealth performs a health check for the Vector service and caches the
// result if successful. The caller must lock a.healthCheckMutex.
func (a *App) vectorHealth(ctx context.Context) vectorHealthDetails {
if a.healthVector != nil {
return *a.healthVector
Expand Down Expand Up @@ -169,10 +179,9 @@ func (a *App) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest)
a.healthCheckMutex.Lock()
defer a.healthCheckMutex.Unlock()

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: I'm thinking we can remove the error returned from this function as well. we aren't returning an error anywhere.

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought so too, but at the moment it's required to ensure App complies to the CheckHealthHandler interface (https://github.com/grafana/grafana-plugin-sdk-go/blob/main/backend/diagnostics.go#L12).

openAI, err := a.openAIHealth(ctx, req)
if err != nil {
openAI.OK = false
openAI.Error = err.Error()
openAI := a.openAIHealth(ctx, req)
if openAI.Error == "" {
a.healthOpenAI = &openAI
}

vector := a.vectorHealth(ctx)
Expand Down
4 changes: 4 additions & 0 deletions pkg/plugin/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ func (a *App) newAuthenticatedOpenAIRequest(ctx context.Context, method string,
req.Header.Set("OpenAI-Organization", a.settings.OpenAI.OrganizationID)
case openAIProviderAzure:
req.Header.Set("api-key", a.settings.OpenAI.apiKey)
case openAIProviderPulze:
req.Header.Set("Authorization", "Bearer "+a.settings.OpenAI.apiKey)
case openAIProviderGrafana:
req.SetBasicAuth(a.settings.Tenant, a.settings.GrafanaComAPIKey)
req.Header.Add("X-Scope-OrgID", a.settings.Tenant)
Expand All @@ -34,6 +36,8 @@ func (a *App) newOpenAIChatCompletionsRequest(ctx context.Context, body map[stri
var err error

switch a.settings.OpenAI.Provider {
case openAIProviderPulze:
fallthrough
case openAIProviderOpenAI:
url, err = url.Parse(a.settings.OpenAI.URL)
if err != nil {
Expand Down
79 changes: 79 additions & 0 deletions pkg/plugin/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,83 @@ func newOpenAIProxy(settings Settings) http.Handler {
}
}

// pulzeOpenAIProxy is a reverse proxy for Pulze API calls.
// It modifies the request to point to the configured OpenAI API, returning
// a 400 error if the URL in settings cannot be parsed, then proxies the request
// using the configured API key and setting the default pulze model if none given.
type pulzeOpenAIProxy struct {
settings Settings
// rp is a reverse proxy handling the modified request. Use this rather than
// our own client, since it handles things like buffering.
rp *httputil.ReverseProxy
}

func newPulzeOpenAIProxy(settings Settings) http.Handler {
// We make all of the actual modifications in ServeHTTP, since they can fail
// and we want to early-return from HTTP requests in that case.
director := func(req *http.Request) {}
return &pulzeOpenAIProxy{
settings: settings,
rp: &httputil.ReverseProxy{Director: director},
}
}

func (p *pulzeOpenAIProxy) modifyRequest(req *http.Request) error {
err := modifyURL(p.settings.OpenAI.URL, req)
if err != nil {
return fmt.Errorf("modify url: %w", err)
}

req.URL.Path = strings.TrimPrefix(req.URL.Path, "/pulze")
req.Header.Add("Authorization", "Bearer "+p.settings.OpenAI.apiKey)

// Read the body so we can determine if we need to add the configured pulze model
bodyBytes, _ := io.ReadAll(req.Body)
var requestBody map[string]interface{}
err = json.Unmarshal(bodyBytes, &requestBody)
if err != nil {
return fmt.Errorf("unmarshal request body: %w", err)
}

// check if model is empty or not present
if val, ok := requestBody["model"].(string); !ok || val == "" {
requestBody["model"] = p.settings.OpenAI.PulzeModel
}

newBodyBytes, err := json.Marshal(requestBody)
if err != nil {
return fmt.Errorf("unmarshal request body: %w", err)
}

req.Body = io.NopCloser(bytes.NewBuffer(newBodyBytes))
req.ContentLength = int64(len(newBodyBytes))
return nil
}

func (p *pulzeOpenAIProxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {
err := p.modifyRequest(req)
if err != nil {
// Attempt to write the error as JSON.
jd, err := json.Marshal(map[string]string{"error": err.Error()})
if err != nil {
// We can't write JSON, so just write the error string.
w.WriteHeader(http.StatusInternalServerError)
_, err = w.Write([]byte(err.Error()))
if err != nil {
log.DefaultLogger.Error("Unable to write error response", "err", err)
}
return
}
w.WriteHeader(http.StatusBadRequest)
_, err = w.Write(jd)
if err != nil {
log.DefaultLogger.Error("Unable to write error response", "err", err)
}
return
}
p.rp.ServeHTTP(w, req)
}

// azureOpenAIProxy is a reverse proxy for Azure OpenAI API calls.
// It modifies the request to point to the configured Azure OpenAI API, returning
// a 400 error if the URL in settings cannot be parsed or if the request refers
Expand Down Expand Up @@ -417,6 +494,8 @@ func (a *App) registerRoutes(mux *http.ServeMux, settings Settings) {
mux.Handle("/openai/", newAzureOpenAIProxy(settings))
case openAIProviderGrafana:
mux.Handle("/openai/", newGrafanaOpenAIProxy(settings))
case openAIProviderPulze:
mux.Handle("/pulze/", newPulzeOpenAIProxy(settings))
default:
log.DefaultLogger.Warn("Unknown OpenAI provider configured", "provider", settings.OpenAI.Provider)
}
Expand Down
46 changes: 46 additions & 0 deletions pkg/plugin/resources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,6 +274,52 @@ func TestCallOpenAIProxy(t *testing.T) {
expReqPath: "/openai/v1/chat/completions",
expReqBody: []byte(`{"model": "gpt-3.5-turbo", "messages": ["some stuff"]}`),

expStatus: http.StatusOK,
},
{
name: "pulze with specific model",

settings: Settings{
OpenAI: OpenAISettings{
Provider: openAIProviderPulze,
PulzeModel: "pulze",
},
},
apiKey: "abcd1234",

method: http.MethodPost,
path: "/pulze/v1/chat/completions",
body: []byte(`{"model": "openai/gpt-3.5-turbo", "messages": ["some stuff"]}`),

expReqHeaders: http.Header{
"Authorization": {"Bearer abcd1234"},
},
expReqPath: "/v1/chat/completions",
expReqBody: []byte(`{"model": "openai/gpt-3.5.turbo", "messages": ["some stuff"]}`),

expStatus: http.StatusOK,
},
{
name: "pulze without model",

settings: Settings{
OpenAI: OpenAISettings{
Provider: openAIProviderPulze,
PulzeModel: "pulze",
},
},
apiKey: "abcd1234",

method: http.MethodPost,
path: "/pulze/v1/chat/completions",
body: []byte(`{"model": "", "messages": ["some stuff"]}`),

expReqHeaders: http.Header{
"Authorization": {"Bearer abcd1234"},
},
expReqPath: "/v1/chat/completions",
expReqBody: []byte(`{"model": "pulze", "messages": ["some stuff"]}`),

expStatus: http.StatusOK,
},
} {
Expand Down
15 changes: 12 additions & 3 deletions pkg/plugin/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,17 @@ import (
"github.com/grafana/grafana-plugin-sdk-go/backend/log"
)

const openAIKey = "openAIKey"
const encodedTenantAndTokenKey = "base64EncodedAccessToken"

type openAIProvider string

const (
openAIProviderOpenAI openAIProvider = "openai"
openAIProviderAzure openAIProvider = "azure"
openAIProviderGrafana openAIProvider = "grafana" // via llm-gateway
openAIProviderPulze openAIProvider = "pulze"

openAIKey = "openAIKey"
llmGatewayKey = "llmGatewayKey"
encodedTenantAndTokenKey = "base64EncodedAccessToken"
)

// OpenAISettings contains the user-specified OpenAI connection details
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think at some point we'll have to consider renaming this to ProviderSettings or something.. (but can be done in a future PR)

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep, agree. We initially planned to include this, but it's better to have a separate PR for this 👍

Expand All @@ -38,6 +40,9 @@ type OpenAISettings struct {
// Model mappings required for Azure's OpenAI
AzureMapping [][]string `json:"azureModelMapping"`

// The pulze model to use
PulzeModel string `json:"pulzeModel"`

// apiKey is the user-specified api key needed to authenticate requests to the OpenAI
// provider (excluding the LLMGateway). Stored securely.
apiKey string
Expand Down Expand Up @@ -121,6 +126,10 @@ func loadSettings(appSettings backend.AppInstanceSettings) (*Settings, error) {
log.DefaultLogger.Warn("Cannot use LLM Gateway as no URL specified, disabling it")
settings.OpenAI.Provider = ""
}
case openAIProviderPulze:
if settings.OpenAI.URL == "" {
settings.OpenAI.URL = "https://api.pulze.ai/v1"
}
default:
// Default to disabled LLM support if an unknown provider was specified.
log.DefaultLogger.Warn("Unknown OpenAI provider", "provider", settings.OpenAI.Provider)
Expand Down
3 changes: 2 additions & 1 deletion src/components/AppConfig/AppConfig.test.tsx
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import React from 'react';
import { PluginType } from '@grafana/data';
import { render, screen } from '@testing-library/react';
import { testIds } from 'components/testIds';
import React from 'react';
import { AppConfig, AppConfigProps } from './AppConfig';

describe('Components/AppConfig', () => {
Expand Down Expand Up @@ -43,6 +43,7 @@ describe('Components/AppConfig', () => {
// expect(screen.queryByTestId(testIds.appConfig.openAIUrl)).toBeInTheDocument();
// expect(screen.queryByTestId(testIds.appConfig.model)).toBeInTheDocument();
expect(screen.queryByRole('group', { name: /vector settings/i })).toBeInTheDocument();
expect(screen.queryByTestId(testIds.appConfig.model)).toBeInTheDocument();
expect(screen.queryByTestId(testIds.appConfig.qdrantSecure)).toBeInTheDocument();
expect(screen.queryByTestId(testIds.appConfig.qdrantAddress)).toBeInTheDocument();
// Don't expect to see the Grafana vector API field when type is qdrant
Expand Down
28 changes: 26 additions & 2 deletions src/components/AppConfig/OpenAI.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ import { testIds } from 'components/testIds';
import { getStyles, Secrets, SecretsSet } from './AppConfig';
import { AzureModelDeploymentConfig, AzureModelDeployments } from './AzureConfig';

export type OpenAIProvider = 'openai' | 'azure' | 'grafana';
export type OpenAIProvider = 'openai' | 'azure' | 'grafana' | 'pulze';
export type PulzeModel = 'pulze' | 'pulze-v0'

export interface OpenAISettings {
// The URL to reach OpenAI.
Expand All @@ -18,6 +19,8 @@ export interface OpenAISettings {
provider?: OpenAIProvider;
// A mapping of OpenAI models to Azure deployment names.
azureModelMapping?: AzureModelDeployments;
// Default pulze model to use if no model is specified.
pulzeModel?: PulzeModel;
}

export function OpenAIConfig({
Expand Down Expand Up @@ -51,6 +54,7 @@ export function OpenAIConfig({
[
{ label: 'OpenAI', value: 'openai' },
{ label: 'Azure OpenAI', value: 'azure' },
{ label: 'Pulze', value: 'pulze' },
] as Array<SelectableValue<OpenAIProvider>>
}
value={settings.provider ?? 'openai'}
Expand Down Expand Up @@ -90,7 +94,7 @@ export function OpenAIConfig({
/>
</Field>

{settings.provider !== 'azure' && (
{settings.provider !== 'azure' && settings.provider !== 'pulze' && (
<Field label="OpenAI API Organization ID" description="Your OpenAI API Organization ID">
<Input
width={60}
Expand Down Expand Up @@ -120,6 +124,26 @@ export function OpenAIConfig({
/>
</Field>
)}

{settings.provider === 'pulze' && (
<Field
label="Default Pulze Model"
description="The default pulze model to use"
data-testid={testIds.appConfig.pulzeModel}
>
<Select
options={
[
{ label: 'pulze', value: 'pulze' },
{ label: 'pulze-v0', value: 'pulze-v0' },
] as Array<SelectableValue<PulzeModel>>
}
value={settings.pulzeModel ?? 'pulze'}
onChange={(e) => onChange({ ...settings, pulzeModel: e.value })}
width={60}
/>
</Field>
)}
</FieldSet>
);
}
1 change: 1 addition & 0 deletions src/components/testIds.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ export const testIds = {
openAIKey: 'data-testid ac-openai-api-key',
openAIOrganizationID: 'data-testid ac-openai-api-organization-id',
openAIUrl: 'data-testid ac-openai-api-url',
pulzeModel: 'data-testid ac-pulze-model',
vectorEnabled: 'data-testid ac-vector-enabled',
qdrantAddress: 'data-testid ac-qdrant-address',
qdrantSecure: 'data-testid ac-qdrant-secure',
Expand Down