Client provides access the PredictionGuard API.
+constructor constructs a Client API for use.
+Chat generates chat completions based on a conversation history.
input represents the entire set of possible input for the Chat call.
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Chat() {
const input = {
model: pg.Models.NeuralChat7B,
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50.0,
options: {
factuality: true,
toxicity: true,
pii: pg.PIIs.Replace,
piiReplaceMethod: pg.ReplaceMethods.Random,
},
};
var [result, err] = await client.Chat(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
Chat();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Chat() {
const input = {
model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50,
options: {
factuality: true,
toxicity: true,
pii: pg.PIIs.Replace,
piiReplaceMethod: pg.ReplaceMethods.Random,
},
};
var [result, err] = await client.Chat(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
Chat();
-ChatSSE
- ChatSSE(input): Promise<null | Error>
ChatSSE generates a stream of chat completions based on a
+
+
ChatSSE
- ChatSSE(input): Promise<null | Error>
ChatSSE generates a stream of chat completions based on a
conversation history.
Parameters
- input: ChatSSEInput
input represents the entire set of
possible input for the SSE Chat call.
@@ -40,36 +35,41 @@
- A Promise with an Error object if the error is not
null.
-Example
import * as pg from 'predictiongaurd';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ChatSSE() {
const input = {
model: pg.Models.NeuralChat7B,
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50.0,
onMessage: function (event, err) {
if (err != null) {
if (err.error == 'EOF') {
return;
}
console.log(err);
}
for (const choice of event.choices) {
if (choice.delta.hasOwnProperty('content')) {
process.stdout.write(choice.delta.content);
}
}
},
};
var err = await client.ChatSSE(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
}
ChatSSE();
+Example
import * as pg from 'predictiongaurd';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ChatSSE() {
const input = {
model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50,
onMessage: function (event, err) {
if (err != null) {
if (err.error == 'EOF') {
return;
}
console.log(err);
}
for (const choice of event.choices) {
if (choice.delta.hasOwnProperty('content')) {
process.stdout.write(choice.delta.content);
}
}
},
};
var err = await client.ChatSSE(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
}
ChatSSE();
-Chat Vision
- Chat
Vision(input): Promise<[ChatVision, null | Error]> ChatVision generates answers a question about an image.
+
+
Chat Vision
- Chat
Vision(input): Promise<[ChatVision, null | Error]> ChatVision generates answers a question about an image.
Parameters
- input: ChatVisionInput
input represents the entire set of
possible input for the Vision Chat call.
Returns Promise<[ChatVision, null | Error]>
- A Promise with a ChatVision object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ChatVision() {
const image = new pg.ImageNetwork('https://pbs.twimg.com/profile_images/1571574401107169282/ylAgz_f5_400x400.jpg');
const input = {
role: pg.Roles.User,
question: 'is there a deer in this picture',
image: image,
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50.0,
};
var [result, err] = await client.ChatVision(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
ChatVision();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ChatVision() {
const image = new pg.ImageNetwork('https://pbs.twimg.com/profile_images/1571574401107169282/ylAgz_f5_400x400.jpg');
const input = {
role: pg.Roles.User,
question: 'is there a deer in this picture',
image: image,
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50,
};
var [result, err] = await client.ChatVision(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
ChatVision();
-Completion
- Completion(input): Promise<[Completion, null | Error]>
Completion generates text completions based on the provided input.
+
+
Completion
- Completion(input): Promise<[Completion, null | Error]>
Completion generates text completions based on the provided input.
Parameters
- input: CompletionInput
input represents the entire set of
possible input for the Completion call.
Returns Promise<[Completion, null | Error]>
- A Promise with a Completion object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Completions() {
const input = {
model: pg.Models.NeuralChat7B,
prompt: 'Will I lose my hair',
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50.0,
};
var [result, err] = await client.Completion(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.choices[0].text);
}
Completions();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Completions() {
const input = {
model: 'Neural-Chat-7B',
prompt: 'Will I lose my hair',
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
topK: 50,
};
var [result, err] = await client.Completion(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.choices[0].text);
}
Completions();
-Embedding
- Embedding(input): Promise<[Embedding, null | Error]>
Embedding generates chat completions based on a conversation history.
-Parameters
- input: EmbeddingInput[]
input represents a collection of
+
+
Embedding
- Embedding(model, input): Promise<[Embedding, null | Error]>
Embedding generates chat completions based on a conversation history.
+Parameters
- model: string
model to use.
+ - input: EmbeddingInput[]
input represents a collection of
text and images to vectorize.
Returns Promise<[Embedding, null | Error]>
- A Promise with a Embedding object and an Error object if
the error is not null.
-Example
import * as pg from 'predictiongaurd';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Embedding() {
const image = new pg.ImageNetwork('https://pbs.twimg.com/profile_images/1571574401107169282/ylAgz_f5_400x400.jpg');
const input = [
{
text: 'This is Bill Kennedy, a decent Go developer.',
image: image,
},
];
var [result, err] = await client.Embedding(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
for (const dt of result.data) {
process.stdout.write(dt.embedding.toString());
}
}
Embedding();
+Example
import * as pg from 'predictiongaurd';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Embedding() {
const image = new pg.ImageNetwork('https://pbs.twimg.com/profile_images/1571574401107169282/ylAgz_f5_400x400.jpg');
const input = [
{
text: 'This is Bill Kennedy, a decent Go developer.',
image: image,
},
];
var [result, err] = await client.Embedding('bridgetower-large-itm-mlm-itc', input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
for (const dt of result.data) {
process.stdout.write(dt.embedding.toString());
}
}
Embedding();
-Factuality
- Factuality(reference, text): Promise<[Factuality, null | Error]>
Factuality checks the factuality of a given text compared to a reference.
+
+
Factuality
- Factuality(reference, text): Promise<[Factuality, null | Error]>
Factuality checks the factuality of a given text compared to a reference.
Parameters
- reference: string
reference represents the reference text
for comparison.
- text: string
text represents the text to be checked
@@ -78,32 +78,35 @@
- A Promise with a Factuality object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Factuality() {
const fact = `The President shall receive in full for his services during
the term for which he shall have been elected compensation in the aggregate
amount of 400,000 a year, to be paid monthly, and in addition an expense
allowance of 50,000 to assist in defraying expenses relating to or resulting
from the discharge of his official duties. Any unused amount of such expense
allowance shall revert to the Treasury pursuant to section 1552 of title 31,
United States Code. No amount of such expense allowance shall be included in
the gross income of the President. He shall be entitled also to the use of
the furniture and other effects belonging to the United States and kept in
the Executive Residence at the White House.`;
const text = `The president of the united states can take a salary of one
million dollars`;
var [result, err] = await client.Factuality(fact, text);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + JSON.stringify(result.checks[0]));
}
Factuality();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Factuality() {
const fact = `The President shall receive in full for his services during
the term for which he shall have been elected compensation in the aggregate
amount of 400,000 a year, to be paid monthly, and in addition an expense
allowance of 50,000 to assist in defraying expenses relating to or resulting
from the discharge of his official duties. Any unused amount of such expense
allowance shall revert to the Treasury pursuant to section 1552 of title 31,
United States Code. No amount of such expense allowance shall be included in
the gross income of the President. He shall be entitled also to the use of
the furniture and other effects belonging to the United States and kept in
the Executive Residence at the White House.`;
const text = `The president of the united states can take a salary of one
million dollars`;
var [result, err] = await client.Factuality(fact, text);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + JSON.stringify(result.checks[0]));
}
Factuality();
-Health Check
- Health
Check(): Promise<[string, null | Error]> HealthCheck validates the PG API Service is available.
+
+
Health Check
- Health
Check(): Promise<[string, null | Error]> HealthCheck validates the PG API Service is available.
Returns Promise<[string, null | Error]>
- A Promise with a string and an Error object if
the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function HealthCheck() {
var [result, err] = await client.HealthCheck();
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log(result);
}
HealthCheck();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function HealthCheck() {
var [result, err] = await client.HealthCheck();
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log(result);
}
HealthCheck();
-Injection
Injection
- Injection(prompt): Promise<[Injection, null | Error]>
Injection detects potential prompt injection attacks in a given prompt.
Parameters
- prompt: string
prompt represents the text to detect
injection attacks against.
Returns Promise<[Injection, null | Error]>
- A Promise with a Injection object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Injection() {
const prompt = `A short poem may be a stylistic choice or it may be that you
have said what you intended to say in a more concise way.`;
var [result, err] = await client.Injection(prompt);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].probability);
}
Injection();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Injection() {
const prompt = `A short poem may be a stylistic choice or it may be that you
have said what you intended to say in a more concise way.`;
var [result, err] = await client.Injection(prompt);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].probability);
}
Injection();
-Protected
Raw Do Get
- Raw
Do Get(endpoint): Promise<[any, null | Error]> RawDoGet performs a raw GET call.
+
+
Protected
Raw Do Get
- Raw
Do Get(endpoint): Promise<[any, null | Error]> RawDoGet performs a raw GET call.
Parameters
- endpoint: string
endpoint represents endpoint to call and
does not include the transport or domain.
Returns Promise<[any, null | Error]>
- A Promise with a respose object and an error object if
the error is not null.
-
Protected
Raw Do Post
- Raw
Do Post(endpoint, body): Promise<[any, null | Error]> RawDoPost performs a raw POST call.
+Protected
Raw Do Post
- Raw
Do Post(endpoint, body): Promise<[any, null | Error]> RawDoPost performs a raw POST call.
Parameters
- endpoint: string
endpoint represents endpoint to call and
does not include the transport or domain.
- body: any
body represents an input object.
@@ -111,16 +114,16 @@
- A Promise with a respose object and an error object if
the error is not null.
-Protected
Raw DoSSEPost
- Raw
DoSSEPost(endpoint, body, onMessage): Promise<null | Error> RawDoSSEPost performs a raw POST call with SSE support.
+Protected
Raw DoSSEPost
- Raw
DoSSEPost(endpoint, body, onMessage): Promise<null | Error> RawDoSSEPost performs a raw POST call with SSE support.
Parameters
- endpoint: string
endpoint represents endpoint to call and
does not include the transport or domain.
- body: any
body represents an input object.
- - onMessage: ((event, err) => void)
onMessage represents a function that will receive the stream of chat
+
- onMessage: ((event: null | ServerSentEvent, err: null | Error) => void)
onMessage represents a function that will receive the stream of chat
results.
- (event, err): void
Parameters
- event: null | ServerSentEvent
- err: null | Error
Returns void
Returns Promise<null | Error>
- A Promise with an error object if the error is not null.
-
ReplacePII
- ReplacePII(replaceMethod, prompt): Promise<[ReplacePII, null | Error]>
ReplacePII replaces personal information such as names, SSNs, and
+
ReplacePII
- ReplacePII(replaceMethod, prompt): Promise<[ReplacePII, null | Error]>
ReplacePII replaces personal information such as names, SSNs, and
emails in a given text.
Parameters
- replaceMethod: ReplaceMethods
replaceMethod represents the
method to use for replacing personal information.
@@ -130,18 +133,20 @@
- A Promise with a ReplacePII object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ReplacePII() {
const replaceMethod = pg.ReplaceMethods.Mask;
const prompt = `My email is bill@ardanlabs.com and my number is 954-123-4567.`;
var [result, err] = await client.ReplacePII(replaceMethod, prompt);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].new_prompt);
}
ReplacePII();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function ReplacePII() {
const replaceMethod = pg.ReplaceMethods.Mask;
const prompt = `My email is bill@ardanlabs.com and my number is 954-123-4567.`;
var [result, err] = await client.ReplacePII(replaceMethod, prompt);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].new_prompt);
}
ReplacePII();
-Toxicity
Toxicity
- Toxicity(text): Promise<[Toxicity, null | Error]>
Toxicity checks the toxicity of a given text.
Parameters
- text: string
text represents the text to be scored
for toxicity.
Returns Promise<[Toxicity, null | Error]>
- A Promise with a Toxicity object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Toxicity() {
const text = `Every flight I have is late and I am very angry. I want to
hurt someone.`;
var [result, err] = await client.Toxicity(text);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].score);
}
Toxicity();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Toxicity() {
const text = `Every flight I have is late and I am very angry. I want to
hurt someone.`;
var [result, err] = await client.Toxicity(text);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.checks[0].score);
}
Toxicity();
-Translate
Translate
- Translate(text, sourceLang, targetLang, useThirdPartyEngine): Promise<[Translate, null | Error]>
Translate converts text from one language to another.
Parameters
- text: string
text represents the text to be translated.
- sourceLang: Languages
sourceLang represents the source
language of the text.
@@ -153,6 +158,7 @@
- A Promise with a Translate object and a Error
object if the error is not null.
-Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Translate() {
const text = `The rain in Spain stays mainly in the plain`;
const sourceLang = pg.Languages.English;
const targetLang = pg.Languages.Spanish;
const useThirdPartyEngine = false;
var [result, err] = await client.Translate(text, sourceLang, targetLang, useThirdPartyEngine);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.best_translation);
}
Translate();
+Example
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Translate() {
const text = `The rain in Spain stays mainly in the plain`;
const sourceLang = pg.Languages.English;
const targetLang = pg.Languages.Spanish;
const useThirdPartyEngine = false;
var [result, err] = await client.Translate(text, sourceLang, targetLang, useThirdPartyEngine);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.best_translation);
}
Translate();
-
\ No newline at end of file
+
+
diff --git a/docs/classes/ImageFile.html b/docs/classes/ImageFile.html
index c2486c3..3ac1dae 100644
--- a/docs/classes/ImageFile.html
+++ b/docs/classes/ImageFile.html
@@ -1,14 +1,12 @@
-ImageFile | predictionguard Class ImageFile
ImageFile provides access retrieve an image from disk.
- Index
Constructors
Properties
base64
-path
+ImageFile | predictionguard Class ImageFile
ImageFile provides access retrieve an image from disk.
+ Index
Constructors
Methods
Constructors
constructor
- new
Image File(path): ImageFile constructor constructs an ImageNetwork to use.
+
Constructors
constructor
Properties
Private
base64
base64: null | stringPrivate
path
path: stringMethods
Encode Base64
- Encode
Base64(): Promise<[string, null | Error]> EncodeBase64 reads an image from disk and converts the image to base64
+
Returns ImageFile
\ No newline at end of file
+
diff --git a/docs/classes/ImageNetwork.html b/docs/classes/ImageNetwork.html
index 913e46a..754da87 100644
--- a/docs/classes/ImageNetwork.html
+++ b/docs/classes/ImageNetwork.html
@@ -1,14 +1,12 @@
-ImageNetwork | predictionguard Class ImageNetwork
ImageNetwork provides access retrieve an image over the network.
- Index
Constructors
Properties
base64
-url
+ImageNetwork | predictionguard Class ImageNetwork
ImageNetwork provides access retrieve an image over the network.
+ Index
Constructors
Methods
Constructors
constructor
- new
Image Network(url): ImageNetwork constructor constructs an ImageNetwork to use.
+
Constructors
constructor
- new
Image Network(url): ImageNetwork constructor constructs an ImageNetwork to use.
Parameters
- url: string
url represents the location of the image.
-
Returns ImageNetwork
Properties
Private
base64
base64: null | stringPrivate
url
url: stringMethods
Encode Base64
- Encode
Base64(): Promise<[string, null | Error]> EncodeBase64 downloads an image from the network and converts the image
+
Returns ImageNetwork
\ No newline at end of file
+
diff --git a/docs/enums/Languages.html b/docs/enums/Languages.html
index 289dcb7..d5f1513 100644
--- a/docs/enums/Languages.html
+++ b/docs/enums/Languages.html
@@ -1,5 +1,5 @@
-Languages | predictionguard Enumeration Languages
Languages represents the set of languages that can be used.
- Index
Enumeration Members
Afrikanns
+Languages | predictionguard Enumeration Languages
Languages represents the set of languages that can be used.
+Enumeration Members
Afrikanns
Afrikanns: "afr"Amharic
Amharic: "amh"Arabic
Arabic: "ara"Armenian
Armenian: "hye"Azerbaijan
Azerbaijan: "aze"Basque
Basque: "eus"Belarusian
Belarusian: "bel"Bengali
Bengali: "ben"Bosnian
Bosnian: "bos"Catalan
Catalan: "cat"Chechen
Chechen: "che"Cherokee
Cherokee: "chr"Chinese
Chinese: "zho"Croatian
Croatian: "hrv"Czech
Czech: "ces"Danish
Danish: "dan"Dutch
Dutch: "nld"English
English: "eng"Estonian
Estonian: "est"Fijian
Fijian: "fij"Filipino
Filipino: "fil"Finnish
Finnish: "fin"French
French: "fra"Galician
Galician: "glg"Georgian
Georgian: "kat"German
German: "deu"Greek
Greek: "ell"Gujarati
Gujarati: "guj"Haitian
Haitian: "hat"Hebrew
Hebrew: "heb"Hindi
Hindi: "hin"Hungarian
Hungarian: "hun"Icelandic
Icelandic: "isl"Indonesian
Indonesian: "ind"Irish
Irish: "gle"Italian
Italian: "ita"Japanese
Japanese: "jpn"Kannada
Kannada: "kan"Kazakh
Kazakh: "kaz"Korean
Korean: "kor"Latvian
Latvian: "lav"Lithuanian
Lithuanian: "lit"Macedonian
Macedonian: "mkd"Malay1
Malay1: "msa"Malay2
Malay2: "zlm"Malayalam
Malayalam: "mal"Maltese
Maltese: "mlt"Marathi
Marathi: "mar"Nepali
Nepali: "nep"Norwegian
Norwegian: "nor"Persian
Persian: "fas"Polish
Polish: "pol"Portuguese
Portuguese: "por"Romanian
Romanian: "ron"Russian
Russian: "rus"Samoan
Samoan: "smo"Serbian
Serbian: "srp"Slavonic
Slavonic: "chu"Slovak
Slovak: "slk"Slovenian
Slovenian: "slv"Spanish
Spanish: "spa"Swahili
Swahili: "swh"Swedish
Swedish: "swe"Tamil
Tamil: "tam"Telugu
Telugu: "tel"Thai
Thai: "tha"Turkish
Turkish: "tur"Ukrainian
Ukrainian: "ukr"Urdu
Urdu: "urd"Vietnamese
Vietnamese: "vie"Welsh
Welsh: "cym"
\ No newline at end of file
+ Enumeration Members
Afrikanns
Afrikanns: "afr"Amharic
Amharic: "amh"Arabic
Arabic: "ara"Armenian
Armenian: "hye"Azerbaijan
Azerbaijan: "aze"Basque
Basque: "eus"Belarusian
Belarusian: "bel"Bengali
Bengali: "ben"Bosnian
Bosnian: "bos"Catalan
Catalan: "cat"Chechen
Chechen: "che"Cherokee
Cherokee: "chr"Chinese
Chinese: "zho"Croatian
Croatian: "hrv"Czech
Czech: "ces"Danish
Danish: "dan"Dutch
Dutch: "nld"English
English: "eng"Estonian
Estonian: "est"Fijian
Fijian: "fij"Filipino
Filipino: "fil"Finnish
Finnish: "fin"French
French: "fra"Galician
Galician: "glg"Georgian
Georgian: "kat"German
German: "deu"Greek
Greek: "ell"Gujarati
Gujarati: "guj"Haitian
Haitian: "hat"Hebrew
Hebrew: "heb"Hindi
Hindi: "hin"Hungarian
Hungarian: "hun"Icelandic
Icelandic: "isl"Indonesian
Indonesian: "ind"Irish
Irish: "gle"Italian
Italian: "ita"Japanese
Japanese: "jpn"Kannada
Kannada: "kan"Kazakh
Kazakh: "kaz"Korean
Korean: "kor"Latvian
Latvian: "lav"Lithuanian
Lithuanian: "lit"Macedonian
Macedonian: "mkd"Malay1
Malay1: "msa"Malay2
Malay2: "zlm"Malayalam
Malayalam: "mal"Maltese
Maltese: "mlt"Marathi
Marathi: "mar"Nepali
Nepali: "nep"Norwegian
Norwegian: "nor"Persian
Persian: "fas"Polish
Polish: "pol"Portuguese
Portuguese: "por"Romanian
Romanian: "ron"Russian
Russian: "rus"Samoan
Samoan: "smo"Serbian
Serbian: "srp"Slavonic
Slavonic: "chu"Slovak
Slovak: "slk"Slovenian
Slovenian: "slv"Spanish
Spanish: "spa"Swahili
Swahili: "swh"Swedish
Swedish: "swe"Tamil
Tamil: "tam"Telugu
Telugu: "tel"Thai
Thai: "tha"Turkish
Turkish: "tur"Ukrainian
Ukrainian: "ukr"Urdu
Urdu: "urd"Vietnamese
Vietnamese: "vie"Welsh
Welsh: "cym"
diff --git a/docs/enums/Models.html b/docs/enums/Models.html
deleted file mode 100644
index 743aea7..0000000
--- a/docs/enums/Models.html
+++ /dev/null
@@ -1,10 +0,0 @@
-Models | predictionguard Enumeration Models
Models represents the set of models that can be used.
- Index
Enumeration Members
Bridgetower Large Itm Mlm Itc
Bridgetower Large Itm Mlm Itc: "bridgetower-large-itm-mlm-itc"Deepseek Coder67BInstruct
Deepseek Coder67BInstruct: "deepseek-coder-6.7b-instruct"Hermes2 Pro Llama38B
Hermes2 Pro Llama38B: "Hermes-2-Pro-Llama-3-8B"Hermes2 Pro Mistral7B
Hermes2 Pro Mistral7B: "Hermes-2-Pro-Mistral-7B"LLama3 Sql Coder8b
LLama3 Sql Coder8b: "llama-3-sqlcoder-8b"Llava157BHF
Llava157BHF: "llava-1.5-7b-hf"Neural Chat7B
Neural Chat7B: "Neural-Chat-7B"Nous Hermes Llama213B
Nous Hermes Llama213B: "Nous-Hermes-Llama-213B"
\ No newline at end of file
diff --git a/docs/enums/PIIs.html b/docs/enums/PIIs.html
index d2656a0..bcf8a45 100644
--- a/docs/enums/PIIs.html
+++ b/docs/enums/PIIs.html
@@ -1,4 +1,4 @@
-PIIs | predictionguard Enumeration PIIs
PIIs represents the set of pii options that can be used.
-
diff --git a/docs/enums/ReplaceMethods.html b/docs/enums/ReplaceMethods.html
index 486318a..7e394f5 100644
--- a/docs/enums/ReplaceMethods.html
+++ b/docs/enums/ReplaceMethods.html
@@ -1,6 +1,6 @@
-ReplaceMethods | predictionguard Enumeration ReplaceMethods
ReplaceMethods represents the set of replace methods that can be used.
- Index
Enumeration Members
diff --git a/docs/enums/Roles.html b/docs/enums/Roles.html
index 5cc393a..6245db9 100644
--- a/docs/enums/Roles.html
+++ b/docs/enums/Roles.html
@@ -1,6 +1,6 @@
-Roles | predictionguard Enumeration Roles
Roles represents the set of roles that a sender can represent themselves
+
Roles | predictionguard
diff --git a/docs/index.html b/docs/index.html
index e952fdd..791b702 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -1,34 +1,40 @@
-predictionguard predictionguard
Prediction Guard JS Client
+predictionguard predictionguard
Prediction Guard JS Client
Copyright 2024 Prediction Guard
bill@predictionguard.com
-Description
This package provides functionality developed to simplify interfacing with Prediction Guard API in JavaScript.
-Requirements
To access the API, contact us here to get an enterprise access token. You will need this access token to continue.
-Usage
Install Package
-$ npm i predictionguard
-
+Description
This package provides functionality developed to simplify interfacing with Prediction Guard API in JavaScript.
+Requirements
To access the API, contact us here to get an enterprise access token. You will need this access token to continue.
+Usage
Install Package
+$ npm i predictionguard
+
+
Code Example
-import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Chat() {
const input = {
model: pg.Models.NeuralChat7B,
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
options: {
factuality: true,
toxicity: true,
pii: pg.PIIs.Replace,
piiReplaceMethod: pg.ReplaceMethods.Random,
},
};
var [result, err] = await client.Chat(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
Chat();
-
-Take a look at the examples directory for more examples.
-Docs
You can find the SDK and Prediction Guard docs using these links.
-
-
-Getting started
Once you have your API key you can use the makefile
to run curl commands for the different API endpoints.
+
import * as pg from 'predictionguard';
const client = new pg.Client('https://api.predictionguard.com', process.env.PGKEY);
async function Chat() {
const input = {
model: pg.Models.NeuralChat7B,
messages: [
{
role: pg.Roles.User,
content: 'How do you feel about the world in general',
},
],
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
options: {
factuality: true,
toxicity: true,
pii: pg.PIIs.Replace,
piiReplaceMethod: pg.ReplaceMethods.Random,
},
};
var [result, err] = await client.Chat(input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
}
console.log('RESULT:' + result.createdDate() + ': ' + result.model + ': ' + result.choices[0].message.content);
}
Chat();
+
+
+Take a look at the examples directory for more examples.
+Docs
You can find the SDK and Prediction Guard docs using these links.
+
+
+Getting started
Once you have your API key you can use the makefile
to run curl commands for the different API endpoints.
For example, make curl-injection
will connect to the injection endpoint and return the injection response.
The makefile
also allows you to run the different examples such as make js-injection
to run the Go injection example.
Running The Project
-You will need to node before you can run the project. You can follow this link or use brew which is what I do.
+You will need to node before you can run the project. You can follow this link or use brew which is what I do.
After you clone the repo and install node, run the install command inside the root of the project folder.
$ make install
+
Then run the test command to make sure everything is working.
$ make test
+
Finally you can try running one of the JS examples.
$ make js-chat
-Licensing
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+Licensing
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
Copyright 2024 Prediction Guard
-
\ No newline at end of file
+
diff --git a/docs/interfaces/Base64Encoder.html b/docs/interfaces/Base64Encoder.html
index aeef8cd..ecf4e5d 100644
--- a/docs/interfaces/Base64Encoder.html
+++ b/docs/interfaces/Base64Encoder.html
@@ -1,4 +1,4 @@
-Base64Encoder | predictionguard Interface Base64Encoder
Index
Methods
diff --git a/docs/interfaces/Chat.html b/docs/interfaces/Chat.html
index 5fd0378..688910c 100644
--- a/docs/interfaces/Chat.html
+++ b/docs/interfaces/Chat.html
@@ -1,15 +1,15 @@
-Chat | predictionguard Interface Chat
Chat represents an object that contains the result for the chat call.
- interface Chat {
choices: ChatChoice[];
created: number;
id: string;
model: Models;
object: string;
createdDate(): Date;
} Index
Properties
choices
+Chat | predictionguard Interface Chat
Chat represents an object that contains the result for the chat call.
+ interface Chat {
choices: ChatChoice[];
created: number;
id: string;
model: string;
object: string;
createdDate(): Date;
}Properties
choices
choices represents the collection of choices to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
choices
choices represents the collection of choices to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-model
model represents the model used for generating the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+model
model: stringmodel represents the model used for generating the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/ChatChoice.html b/docs/interfaces/ChatChoice.html
index fc5e3e0..bc4e1ec 100644
--- a/docs/interfaces/ChatChoice.html
+++ b/docs/interfaces/ChatChoice.html
@@ -1,10 +1,10 @@
-ChatChoice | predictionguard Interface ChatChoice
ChatChoice represents an object that contains a result choice.
- Index
Properties
index
+ChatChoice | predictionguard Interface ChatChoice
ChatChoice represents an object that contains a result choice.
+Properties
index
index: numberindex represents the index position in the collection for
+
Properties
index
index: numberindex represents the index position in the collection for
this choice.
-message
message represents the message response for this choice.
-status
status: stringstatus represents if the response for this choice was successful
+
message
message represents the message response for this choice.
+status
status: stringstatus represents if the response for this choice was successful
or not.
-
\ No newline at end of file
+
diff --git a/docs/interfaces/ChatInput.html b/docs/interfaces/ChatInput.html
index fa109b2..2ef55b6 100644
--- a/docs/interfaces/ChatInput.html
+++ b/docs/interfaces/ChatInput.html
@@ -1,22 +1,16 @@
-ChatInput | predictionguard Interface ChatInput
ChatInput represents the full potential input options for chat.
- interface ChatInput {
maxTokens: number;
messages: ChatInputMessage[];
model: Models;
options: ChatInputOptions;
temperature: number;
topK: number;
topP: number;
} Index
Properties
max Tokens
+ChatInput | predictionguard Interface ChatInput
ChatInput represents the full potential input options for chat.
+ interface ChatInput {
maxTokens: number;
messages: ChatInputMessage[];
model: string;
options: ChatInputOptions;
temperature: number;
topK: number;
topP: number;
}Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
-messages
messages represents the set of messages to process.
-model
model represents the model to use. You are restriced to these models:
-DeepseekCoder67BInstruct
-Hermes2ProLlama38B
-Hermes2ProMistral7B
-LLama3SqlCoder8b
-Llava157BHF
-NeuralChat7B
-options
options represents a set of optional parameters.
-temperature
temperature: numbertemperature represents the randomness in GPT's output.
-topK
topK: numbertopK represents the variability of the generated text.
-topP
topP: numbertopP represents the diversity of the generated text.
-
\ No newline at end of file
+ Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
+messages
messages represents the set of messages to process.
+model
model: stringmodel represents the model to use.
+options
options represents a set of optional parameters.
+temperature
temperature: numbertemperature represents the randomness in GPT's output.
+topK
topK: numbertopK represents the variability of the generated text.
+topP
topP: numbertopP represents the diversity of the generated text.
+
diff --git a/docs/interfaces/ChatInputMessage.html b/docs/interfaces/ChatInputMessage.html
index ce7ff1d..08c08cf 100644
--- a/docs/interfaces/ChatInputMessage.html
+++ b/docs/interfaces/ChatInputMessage.html
@@ -1,6 +1,6 @@
-ChatInputMessage | predictionguard Interface ChatInputMessage
ChatInputMessage represents a role and content related to a chat.
- Index
Properties
content
+ChatInputMessage | predictionguard
\ No newline at end of file
+
diff --git a/docs/interfaces/ChatInputOptions.html b/docs/interfaces/ChatInputOptions.html
index 75f81c5..97573a0 100644
--- a/docs/interfaces/ChatInputOptions.html
+++ b/docs/interfaces/ChatInputOptions.html
@@ -1,14 +1,14 @@
-ChatInputOptions | predictionguard Interface ChatInputOptions
ChatInputOptions represents options for post and preprocessing the input.
- interface ChatInputOptions {
blockPromptInjection: boolean;
factuality: boolean;
pii: PIIs;
piiReplaceMethod: ReplaceMethods;
toxicity: boolean;
} Index
Properties
block Prompt Injection
+ChatInputOptions | predictionguard Interface ChatInputOptions
ChatInputOptions represents options for post and preprocessing the input.
+ interface ChatInputOptions {
blockPromptInjection: boolean;
factuality: boolean;
pii: PIIs;
piiReplaceMethod: ReplaceMethods;
toxicity: boolean;
} Index
Properties
Properties
block Prompt Injection
block Prompt Injection: booleanblockPromptInjection represents the choice to run the
+
Properties
block Prompt Injection
block Prompt Injection: booleanblockPromptInjection represents the choice to run the
blockPromptInjection algorithm.
-factuality
factuality: booleanfactuality represents the choice to run the factuality algorithm.
-pii
pii represents the choice to run the repalce personal information
- algorithm and which one.
-pii Replace Method
piiReplaceMethod represents the method to use for PII.
-toxicity
toxicity: booleantoxicity represents the choice to run the toxicity algorithm.
-
\ No newline at end of file
+factuality
factuality: booleanfactuality represents the choice to run the factuality algorithm.
+pii
pii represents the choice to run the repalce personal information
+algorithm and which one.
+pii Replace Method
piiReplaceMethod represents the method to use for PII.
+toxicity
toxicity: booleantoxicity represents the choice to run the toxicity algorithm.
+
diff --git a/docs/interfaces/ChatMessage.html b/docs/interfaces/ChatMessage.html
index 17bdae4..1d08381 100644
--- a/docs/interfaces/ChatMessage.html
+++ b/docs/interfaces/ChatMessage.html
@@ -1,9 +1,9 @@
-ChatMessage | predictionguard Interface ChatMessage
ChatMessage represents an object that contains the content and a role. It
+
ChatMessage | predictionguard Interface ChatMessage
ChatMessage represents an object that contains the content and a role. It
can be used for input and returned as part of the response.
- Index
Properties
content
+Properties
content
content: stringcontent represents the content of the message.
-output
output: stringoutput represents the output for this message.
-role
role represents the role of the sender (user or assistant).
-
\ No newline at end of file
+
diff --git a/docs/interfaces/ChatSSE.html b/docs/interfaces/ChatSSE.html
index 3d769c2..9b15ba5 100644
--- a/docs/interfaces/ChatSSE.html
+++ b/docs/interfaces/ChatSSE.html
@@ -1,16 +1,16 @@
-ChatSSE | predictionguard Interface ChatSSE
ChatSSE represents an object that contains the result for the chatSSE
+
ChatSSE | predictionguard Interface ChatSSE
ChatSSE represents an object that contains the result for the chatSSE
call.
- interface ChatSSE {
choices: ChatSSEChoice[];
created: number;
id: string;
model: Models;
object: string;
createdDate(): Date;
} Index
Properties
choices
+ interface ChatSSE {
choices: ChatSSEChoice[];
created: number;
id: string;
model: string;
object: string;
createdDate(): Date;
}Properties
choices
choices represents the collection of choices to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
choices
choices represents the collection of choices to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-model
model represents the model used for generating the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+model
model: stringmodel represents the model used for generating the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/ChatSSEChoice.html b/docs/interfaces/ChatSSEChoice.html
index c76e1b4..ca69417 100644
--- a/docs/interfaces/ChatSSEChoice.html
+++ b/docs/interfaces/ChatSSEChoice.html
@@ -1,15 +1,15 @@
-ChatSSEChoice | predictionguard Interface ChatSSEChoice
ChatSSEChoice represents an object that contains a result choice.
- interface ChatSSEChoice {
delta: ChatSSEDelta;
finish_reason: string;
generated_text: string;
index: number;
logprobs: number;
} Index
Properties
delta
+ChatSSEChoice | predictionguard Interface ChatSSEChoice
ChatSSEChoice represents an object that contains a result choice.
+ interface ChatSSEChoice {
delta: ChatSSEDelta;
finish_reason: string;
generated_text: string;
index: number;
logprobs: number;
} Index
Properties
Properties
delta
delta represents the partial content for this choice.
-finish_ reason
finish_ reason: stringfinish_reason represents the reason the response has finished
+
Properties
delta
delta represents the partial content for this choice.
+finish_ reason
finish_ reason: stringfinish_reason represents the reason the response has finished
which is provided when this is the last choice.
-generated_ text
generated_ text: stringgenerated_text represents the final completed chat response which
+
generated_ text
generated_ text: stringgenerated_text represents the final completed chat response which
is provided when this is the last choice.
-index
index: numberindex represents the index position in the collection for
+
index
index: numberindex represents the index position in the collection for
this choice.
-logprobs
logprobs: numberlogprobs represents the log probabilty of accuracy for this choice.
-
\ No newline at end of file
+logprobs
logprobs: numberlogprobs represents the log probabilty of accuracy for this choice.
+
diff --git a/docs/interfaces/ChatSSEDelta.html b/docs/interfaces/ChatSSEDelta.html
index 651b9c1..33444a1 100644
--- a/docs/interfaces/ChatSSEDelta.html
+++ b/docs/interfaces/ChatSSEDelta.html
@@ -1,4 +1,4 @@
-ChatSSEDelta | predictionguard
\ No newline at end of file
+ChatSSEDelta | predictionguard
diff --git a/docs/interfaces/ChatSSEInput.html b/docs/interfaces/ChatSSEInput.html
index 6775817..6960afc 100644
--- a/docs/interfaces/ChatSSEInput.html
+++ b/docs/interfaces/ChatSSEInput.html
@@ -1,22 +1,16 @@
-ChatSSEInput | predictionguard Interface ChatSSEInput
ChatSSEInput represents the full potential input options for SSE chat.
- interface ChatSSEInput {
maxTokens: number;
messages: ChatInputMessage[];
model: Models;
onMessage: ((event, err) => void);
temperature: number;
topK: number;
topP: number;
} Index
Properties
max Tokens
+ChatSSEInput | predictionguard Interface ChatSSEInput
ChatSSEInput represents the full potential input options for SSE chat.
+ interface ChatSSEInput {
maxTokens: number;
messages: ChatInputMessage[];
model: string;
onMessage: ((event: null | ChatSSE, err: null | Error) => void);
temperature: number;
topK: number;
topP: number;
}Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
-messages
messages represents the set of messages to process.
-model
model represents the model to use. You are restriced to these models:
-DeepseekCoder67BInstruct
-Hermes2ProLlama38B
-Hermes2ProMistral7B
-LLama3SqlCoder8b
-Llava157BHF
-NeuralChat7B
-on Message
on Message: ((event, err) => void)onMessage represents a function that will receive the messages.
-temperature
temperature: numbertemperature represents the randomness in GPT's output.
-topK
topK: numbertopK represents the variability of the generated text.
-topP
topP: numbertopP represents the diversity of the generated text.
-
\ No newline at end of file
+ Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
+messages
messages represents the set of messages to process.
+model
model: stringmodel represents the model to use.
+on Message
onMessage represents a function that will receive the messages.
+temperature
temperature: numbertemperature represents the randomness in GPT's output.
+topK
topK: numbertopK represents the variability of the generated text.
+topP
topP: numbertopP represents the diversity of the generated text.
+
diff --git a/docs/interfaces/ChatVision.html b/docs/interfaces/ChatVision.html
index cd6eec5..c3dbe00 100644
--- a/docs/interfaces/ChatVision.html
+++ b/docs/interfaces/ChatVision.html
@@ -1,15 +1,15 @@
-ChatVision | predictionguard Interface ChatVision
ChatVision represents the result for the vision call.
- interface ChatVision {
choices: ChatVisionChoice[];
created: number;
id: string;
model: Models;
object: string;
createdDate(): Date;
} Index
Properties
choices
+ChatVision | predictionguard Interface ChatVision
ChatVision represents the result for the vision call.
+ interface ChatVision {
choices: ChatVisionChoice[];
created: number;
id: string;
model: string;
object: string;
createdDate(): Date;
}Properties
choices
choices represents the collection of choices to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
choices
choices represents the collection of choices to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-model
model represents the model used for generating the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+model
model: stringmodel represents the model used for generating the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/ChatVisionChoice.html b/docs/interfaces/ChatVisionChoice.html
index 80ada5c..af2d10a 100644
--- a/docs/interfaces/ChatVisionChoice.html
+++ b/docs/interfaces/ChatVisionChoice.html
@@ -1,10 +1,10 @@
-ChatVisionChoice | predictionguard Interface ChatVisionChoice
ChatVisionChoice represents a choice for the vision call.
- Index
Properties
index
+ChatVisionChoice | predictionguard Interface ChatVisionChoice
ChatVisionChoice represents a choice for the vision call.
+Properties
index
index: numberindex represents the index position in the collection for
+
Properties
index
index: numberindex represents the index position in the collection for
this choice.
-message
message represents a response for this choice.
-status
status: stringstatus represents if the response for this choice was successful
+
message
message represents a response for this choice.
+status
status: stringstatus represents if the response for this choice was successful
or not.
-
\ No newline at end of file
+
diff --git a/docs/interfaces/ChatVisionInput.html b/docs/interfaces/ChatVisionInput.html
index 8b186f6..54ca77a 100644
--- a/docs/interfaces/ChatVisionInput.html
+++ b/docs/interfaces/ChatVisionInput.html
@@ -1,16 +1,18 @@
-ChatVisionInput | predictionguard Interface ChatVisionInput
ChatVisionInput represents the full potential input options for Vision chat.
- interface ChatVisionInput {
image: Base64Encoder;
maxTokens: number;
question: string;
role: Roles;
temperature: number;
topK: number;
topP: number;
} Index
Properties
image
+ChatVisionInput | predictionguard Interface ChatVisionInput
ChatVisionInput represents the full potential input options for Vision chat.
+ interface ChatVisionInput {
image: Base64Encoder;
maxTokens: number;
model: string;
question: string;
role: Roles;
temperature: number;
topK: number;
topP: number;
}Properties
image
image represents an object that knows how to retrieve an image.
-max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
-question
question: stringquestion represents the question about the image.
-role
role represents the role of the sender (user or assistant).
-temperature
temperature: numbertemperature represents the randomness in GPT's output.
-topK
topK: numbertopK represents the variability of the generated text.
-topP
topP: numbertopP represents the diversity of the generated text.
-
\ No newline at end of file
+ Properties
image
image represents an object that knows how to retrieve an image.
+max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
+model
model: stringmodel represents the model to use.
+question
question: stringquestion represents the question about the image.
+role
role represents the role of the sender (user or assistant).
+temperature
temperature: numbertemperature represents the randomness in GPT's output.
+topK
topK: numbertopK represents the variability of the generated text.
+topP
topP: numbertopP represents the diversity of the generated text.
+
diff --git a/docs/interfaces/ChatVisionMessage.html b/docs/interfaces/ChatVisionMessage.html
index f01369a..b1584a7 100644
--- a/docs/interfaces/ChatVisionMessage.html
+++ b/docs/interfaces/ChatVisionMessage.html
@@ -1,8 +1,8 @@
-ChatVisionMessage | predictionguard Interface ChatVisionMessage
ChatVisionMessage represents content for the vision call.
- Index
Properties
content
+ChatVisionMessage | predictionguard Interface ChatVisionMessage
ChatVisionMessage represents content for the vision call.
+Properties
content
content: stringcontent represents the response for this message.
-output
output: stringoutput represents the output for this message.
-role
role represents the role of the sender (user or assistant).
-
\ No newline at end of file
+
diff --git a/docs/interfaces/Completion.html b/docs/interfaces/Completion.html
index 195f282..ff29187 100644
--- a/docs/interfaces/Completion.html
+++ b/docs/interfaces/Completion.html
@@ -1,14 +1,14 @@
-Completion | predictionguard Interface Completion
Completion represents an object that contains the result for the
+
Completion | predictionguard Interface Completion
Completion represents an object that contains the result for the
completion call.
- interface Completion {
choices: CompletionChoice[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
choices
+ interface Completion {
choices: CompletionChoice[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
Methods
Properties
choices
choices represents the collection of choices to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
choices
choices represents the collection of choices to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/CompletionChoice.html b/docs/interfaces/CompletionChoice.html
index 827d439..5080b43 100644
--- a/docs/interfaces/CompletionChoice.html
+++ b/docs/interfaces/CompletionChoice.html
@@ -1,13 +1,13 @@
-CompletionChoice | predictionguard Interface CompletionChoice
Choice represents an object that contains a result choice.
- Index
Properties
index
+CompletionChoice | predictionguard Interface CompletionChoice
Choice represents an object that contains a result choice.
+Properties
index
index: numberindex represents the index position in the collection for
+
Properties
index
index: numberindex represents the index position in the collection for
this choice.
-model
model represents the model used for generating the result for
+
model
model: stringmodel represents the model used for generating the result for
this choice.
-status
status: stringstatus represents if the response for this choice was successful
+
status
status: stringstatus represents if the response for this choice was successful
or not.
-text
text: stringtext represents the generated text for this choice.
-
\ No newline at end of file
+text
text: stringtext represents the generated text for this choice.
+
diff --git a/docs/interfaces/CompletionInput.html b/docs/interfaces/CompletionInput.html
index 1237eb3..6482f9c 100644
--- a/docs/interfaces/CompletionInput.html
+++ b/docs/interfaces/CompletionInput.html
@@ -1,19 +1,14 @@
-CompletionInput | predictionguard Interface CompletionInput
CompletionInput represents the full potential input options for completion.
- interface CompletionInput {
maxTokens: number;
model: Models;
prompt: string;
temperature: number;
topK: number;
topP: number;
} Index
Properties
max Tokens
+CompletionInput | predictionguard Interface CompletionInput
CompletionInput represents the full potential input options for completion.
+ interface CompletionInput {
maxTokens: number;
model: string;
prompt: string;
temperature: number;
topK: number;
topP: number;
}Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
-model
model represents the model to use. You are restriced to these models:
-DeepseekCoder67BInstruct
-Hermes2ProLlama38B
-Hermes2ProMistral7B
-NeuralChat7B
-NousHermesLlama213B
-prompt
prompt: stringprompt represents the prompt to process.
-temperature
temperature: numbertemperature represents the randomness in GPT's output.
-topK
topK: numbertopK represents the variability of the generated text.
-topP
topP: numbertopP represents the diversity of the generated text.
-
\ No newline at end of file
+ Properties
max Tokens
max Tokens: numbermaxTokens represents the max number of tokens to return.
+model
model: stringmodel represents the model to use.
+prompt
prompt: stringprompt represents the prompt to process.
+temperature
temperature: numbertemperature represents the randomness in GPT's output.
+topK
topK: numbertopK represents the variability of the generated text.
+topP
topP: numbertopP represents the diversity of the generated text.
+
diff --git a/docs/interfaces/Embedding.html b/docs/interfaces/Embedding.html
index 0335418..00ae186 100644
--- a/docs/interfaces/Embedding.html
+++ b/docs/interfaces/Embedding.html
@@ -1,15 +1,15 @@
-Embedding | predictionguard Interface Embedding
Embedding represents the result for the embedding call.
- interface Embedding {
created: number;
data: EmbeddingData[];
id: string;
model: Models;
object: string;
createdDate(): Date;
} Index
Properties
created
+Embedding | predictionguard Interface Embedding
Embedding represents the result for the embedding call.
+ interface Embedding {
created: number;
data: EmbeddingData[];
id: string;
model: string;
object: string;
createdDate(): Date;
}Properties
created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
created
created: numbercreated represents the unix timestamp for when the request was
received.
-data
EmbeddingData represents the collection of vector points.
-id
id: stringid represents a unique identifier for the result.
-model
model represents the model used for generating the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+data
EmbeddingData represents the collection of vector points.
+id
id: stringid represents a unique identifier for the result.
+model
model: stringmodel represents the model used for generating the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/EmbeddingData.html b/docs/interfaces/EmbeddingData.html
index e9bd7d2..17ca1cd 100644
--- a/docs/interfaces/EmbeddingData.html
+++ b/docs/interfaces/EmbeddingData.html
@@ -1,9 +1,9 @@
-EmbeddingData | predictionguard Interface EmbeddingData
EmbeddingData represents the vector data points.
- Index
Properties
embedding
+EmbeddingData | predictionguard Interface EmbeddingData
EmbeddingData represents the vector data points.
+Properties
embedding
embedding: number[]index
index: numberindex represents the index position in the collection for
+
Properties
embedding
embedding: number[]index
index: numberindex represents the index position in the collection for
this choice.
-status
status: stringstatus represents if the response for this choice was successful
+
status
status: stringstatus represents if the response for this choice was successful
or not.
-
\ No newline at end of file
+
diff --git a/docs/interfaces/EmbeddingInput.html b/docs/interfaces/EmbeddingInput.html
index 5eee34a..524ac54 100644
--- a/docs/interfaces/EmbeddingInput.html
+++ b/docs/interfaces/EmbeddingInput.html
@@ -1,6 +1,6 @@
-EmbeddingInput | predictionguard Interface EmbeddingInput
EmbeddingInput represents the input to generate embeddings.
- Index
Properties
image
+EmbeddingInput | predictionguard
\ No newline at end of file
+
diff --git a/docs/interfaces/Error.html b/docs/interfaces/Error.html
index 31c7364..8e1f5f6 100644
--- a/docs/interfaces/Error.html
+++ b/docs/interfaces/Error.html
@@ -1,4 +1,4 @@
-Error | predictionguard
\ No newline at end of file
+Error | predictionguard
diff --git a/docs/interfaces/Factuality.html b/docs/interfaces/Factuality.html
index 9e0a9c8..8498e67 100644
--- a/docs/interfaces/Factuality.html
+++ b/docs/interfaces/Factuality.html
@@ -1,14 +1,14 @@
-Factuality | predictionguard Interface Factuality
Factuality represents an object that contains the result for the
+
Factuality | predictionguard Interface Factuality
Factuality represents an object that contains the result for the
factuality call.
- interface Factuality {
checks: FactualityCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
checks
+ interface Factuality {
checks: FactualityCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
Methods
Properties
checks
checks represents the collection of checks to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
checks
checks represents the collection of checks to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/FactualityCheck.html b/docs/interfaces/FactualityCheck.html
index 12b87ed..9f17a45 100644
--- a/docs/interfaces/FactualityCheck.html
+++ b/docs/interfaces/FactualityCheck.html
@@ -1,9 +1,9 @@
-FactualityCheck | predictionguard Interface FactualityCheck
FactualityCheck represents an object that contains a check choice.
- Index
Properties
index
+FactualityCheck | predictionguard
\ No newline at end of file
+score
score: numberscore represents the score for this check.
+status
status: stringstatus represents the status for this check.
+
diff --git a/docs/interfaces/Injection.html b/docs/interfaces/Injection.html
index e694b72..56087aa 100644
--- a/docs/interfaces/Injection.html
+++ b/docs/interfaces/Injection.html
@@ -1,14 +1,14 @@
-Injection | predictionguard Interface Injection
Injection represents an object that contains the result for the
+
Injection | predictionguard Interface Injection
Injection represents an object that contains the result for the
injection call.
- interface Injection {
checks: InjectionCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
checks
+ interface Injection {
checks: InjectionCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
Methods
Properties
checks
checks represents the collection of checks to choose from.
-created
created: numbercreated represents the unix timestamp for when the result was
+
Properties
checks
checks represents the collection of checks to choose from.
+created
created: numbercreated represents the unix timestamp for when the result was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/InjectionCheck.html b/docs/interfaces/InjectionCheck.html
index 54b60c7..34071b0 100644
--- a/docs/interfaces/InjectionCheck.html
+++ b/docs/interfaces/InjectionCheck.html
@@ -1,10 +1,10 @@
-InjectionCheck | predictionguard Interface InjectionCheck
InjectionCheck represents an object that contains a check choice.
- Index
Properties
index
+InjectionCheck | predictionguard Interface InjectionCheck
InjectionCheck represents an object that contains a check choice.
+ Index
Properties
Properties
index
index: numberindex represents the index position in the collection for
+
Properties
index
index: numberindex represents the index position in the collection for
this checks.
-probability
probability: numberprobability represents the probability of a potential injection
+
probability
probability: numberprobability represents the probability of a potential injection
attack.
-status
status: stringstatus represents the status for this check.
-
\ No newline at end of file
+status
status: stringstatus represents the status for this check.
+
diff --git a/docs/interfaces/ReplacePII.html b/docs/interfaces/ReplacePII.html
index aa58e61..e348df8 100644
--- a/docs/interfaces/ReplacePII.html
+++ b/docs/interfaces/ReplacePII.html
@@ -1,14 +1,14 @@
-ReplacePII | predictionguard Interface ReplacePII
ReplacePII represents an object that contains the result for the
+
ReplacePII | predictionguard Interface ReplacePII
ReplacePII represents an object that contains the result for the
replacepi call.
- interface ReplacePII {
checks: ReplacePIICheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
checks
+ interface ReplacePII {
checks: ReplacePIICheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
Methods
Properties
checks
checks represents the collection of checks to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
checks
checks represents the collection of checks to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/ReplacePIICheck.html b/docs/interfaces/ReplacePIICheck.html
index 3b11254..360de86 100644
--- a/docs/interfaces/ReplacePIICheck.html
+++ b/docs/interfaces/ReplacePIICheck.html
@@ -1,9 +1,9 @@
-ReplacePIICheck | predictionguard Interface ReplacePIICheck
ReplacePIICheck represents an object that contains a check choice.
- Index
Properties
index
+ReplacePIICheck | predictionguard
\ No newline at end of file
+new_ prompt
new_ prompt: stringnew_prompt represents the text with replaced personal information.
+status
status: stringstatus represents the status for this check.
+
diff --git a/docs/interfaces/Toxicity.html b/docs/interfaces/Toxicity.html
index cdca08f..052404d 100644
--- a/docs/interfaces/Toxicity.html
+++ b/docs/interfaces/Toxicity.html
@@ -1,14 +1,14 @@
-Toxicity | predictionguard Interface Toxicity
Toxicity represents an object that contains the result for the
+
Toxicity | predictionguard Interface Toxicity
Toxicity represents an object that contains the result for the
toxicity call.
- interface Toxicity {
checks: ToxicityCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
checks
+ interface Toxicity {
checks: ToxicityCheck[];
created: number;
id: string;
object: string;
createdDate(): Date;
} Index
Properties
Methods
Properties
checks
checks represents the collection of checks to choose from.
-created
created: numbercreated represents the unix timestamp for when the request was
+
Properties
checks
checks represents the collection of checks to choose from.
+created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+
diff --git a/docs/interfaces/ToxicityCheck.html b/docs/interfaces/ToxicityCheck.html
index 3c11b30..47ad771 100644
--- a/docs/interfaces/ToxicityCheck.html
+++ b/docs/interfaces/ToxicityCheck.html
@@ -1,9 +1,9 @@
-ToxicityCheck | predictionguard Interface ToxicityCheck
ToxicityCheck represents an object that contains a check choice.
- Index
Properties
index
+ToxicityCheck | predictionguard
\ No newline at end of file
+score
score: numberscore represents the score for the provided text.
+status
status: stringstatus represents the status for this check.
+
diff --git a/docs/interfaces/Translate.html b/docs/interfaces/Translate.html
index b3fbed9..a7ffc50 100644
--- a/docs/interfaces/Translate.html
+++ b/docs/interfaces/Translate.html
@@ -1,6 +1,6 @@
-Translate | predictionguard Interface Translate
Translate represents an object that contains the result for the
+
Translate | predictionguard Interface Translate
Translate represents an object that contains the result for the
translate call.
- interface Translate {
best_score: number;
best_translation: string;
best_translation_model: string;
created: number;
id: string;
object: string;
translations: Translation[];
createdDate(): Date;
} Index
Properties
interface Translate {
best_score: number;
best_translation: string;
best_translation_model: string;
created: number;
id: string;
object: string;
translations: Translation[];
createdDate(): Date;
} Index
Properties
Methods
Properties
best_ score
best_ score: numberbest_score represents the best score for the best translation.
-best_ translation
best_ translation: stringbest_translation represents the best translation of the input text.
-best_ translation_ model
best_ translation_ model: stringbest_translation_model represents the model used for the best
+
Properties
best_ score
best_ score: numberbest_score represents the best score for the best translation.
+best_ translation
best_ translation: stringbest_translation represents the best translation of the input text.
+best_ translation_ model
best_ translation_ model: stringbest_translation_model represents the model used for the best
translation.
-created
created: numbercreated represents the unix timestamp for when the request was
+
created
created: numbercreated represents the unix timestamp for when the request was
received.
-id
id: stringid represents a unique identifier for the result.
-object
object: stringobject represent the type of the result document.
-translations
translations represents the collection of translations to choose from.
-Methods
created Date
\ No newline at end of file
+id
id: stringid represents a unique identifier for the result.
+object
object: stringobject represent the type of the result document.
+translations
translations represents the collection of translations to choose from.
+
diff --git a/docs/interfaces/Translation.html b/docs/interfaces/Translation.html
index 37c1696..eecffb1 100644
--- a/docs/interfaces/Translation.html
+++ b/docs/interfaces/Translation.html
@@ -1,10 +1,10 @@
-Translation | predictionguard Interface Translation
Translation represents an object that contains a translation choice.
- Index
Properties
model
+Translation | predictionguard Interface Translation
Translation represents an object that contains a translation choice.
+ Index
Properties
Properties
model
model: stringmodel represents the model that was used for this translation.
-score
score: numberscore represents the quality score for this translation.
-status
status: stringstatus represents the status of using the model for this translation.
-translation
translation: stringtranslation represents the translation.
-
\ No newline at end of file
+ Properties
model
model: stringmodel represents the model that was used for this translation.
+score
score: numberscore represents the quality score for this translation.
+status
status: stringstatus represents the status of using the model for this translation.
+translation
translation: stringtranslation represents the translation.
+
diff --git a/docs/modules.html b/docs/modules.html
index d14a8c8..270ee29 100644
--- a/docs/modules.html
+++ b/docs/modules.html
@@ -1,5 +1,4 @@
-predictionguard
diff --git a/examples/chat.js b/examples/chat.js
index d6e21cc..3de3b18 100644
--- a/examples/chat.js
+++ b/examples/chat.js
@@ -4,7 +4,7 @@ const client = new pg.Client('https://api.predictionguard.com', process.env.PGKE
async function Chat() {
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
@@ -14,7 +14,7 @@ async function Chat() {
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
- topK: 50.0,
+ topK: 50,
options: {
factuality: true,
toxicity: true,
diff --git a/examples/chat_sse.js b/examples/chat_sse.js
index 2b6ebe7..4c7e6a9 100644
--- a/examples/chat_sse.js
+++ b/examples/chat_sse.js
@@ -4,7 +4,7 @@ const client = new pg.Client('https://api.predictionguard.com', process.env.PGKE
async function ChatSSE() {
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
@@ -14,7 +14,7 @@ async function ChatSSE() {
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
- topK: 50.0,
+ topK: 50,
onMessage: function (event, err) {
if (err != null) {
if (err.error == 'EOF') {
diff --git a/examples/chat_vision.js b/examples/chat_vision.js
index 929b177..dbde6c7 100644
--- a/examples/chat_vision.js
+++ b/examples/chat_vision.js
@@ -6,16 +6,17 @@ async function ChatVision() {
const image = new pg.ImageNetwork('https://images.ctfassets.net/hrltx12pl8hq/7GlCy7xexnzzrAARg86iUj/f4429bfa8397f81a2429ea003181347f/Autumn_Vectors.jpg');
const input = {
+ model: 'llava-1.5-7b-hf',
role: pg.Roles.User,
question: 'is there a deer in this picture',
image: image,
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
- topK: 50.0,
+ topK: 50,
};
- var [result, err] = await client.ChatVision(input);
+ var [result, err] = await client.ChatVision('bridgetower-large-itm-mlm-itc', input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
diff --git a/examples/completion.js b/examples/completion.js
index ae93646..d6244a2 100644
--- a/examples/completion.js
+++ b/examples/completion.js
@@ -4,12 +4,12 @@ const client = new pg.Client('https://api.predictionguard.com', process.env.PGKE
async function Completions() {
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
prompt: 'Will I lose my hair',
maxTokens: 1000,
temperature: 0.1,
topP: 0.1,
- topK: 50.0,
+ topK: 50,
};
var [result, err] = await client.Completion(input);
diff --git a/examples/embedding.js b/examples/embedding.js
index 7ae6343..b0c34c4 100644
--- a/examples/embedding.js
+++ b/examples/embedding.js
@@ -12,7 +12,7 @@ async function Embedding() {
},
];
- var [result, err] = await client.Embedding(input);
+ var [result, err] = await client.Embedding('bridgetower-large-itm-mlm-itc', input);
if (err != null) {
console.log('ERROR:' + err.error);
return;
diff --git a/package-lock.json b/package-lock.json
index 1144b78..75cd6ae 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "predictionguard",
- "version": "0.20.0",
+ "version": "0.21.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "predictionguard",
- "version": "0.20.0",
+ "version": "0.21.0",
"license": "Apache 2.0",
"dependencies": {
"fetch-sse": "^1.0.23",
diff --git a/package.json b/package.json
index 96c55a6..39a476b 100644
--- a/package.json
+++ b/package.json
@@ -2,7 +2,7 @@
"type": "module",
"name": "predictionguard",
"author": "Prediction Guard",
- "version": "0.20.0",
+ "version": "0.21.0",
"license": "Apache 2.0",
"main": "./dist/index.js",
"types": "./dist/index.d.ts",
diff --git a/src/api_client.ts b/src/api_client.ts
index e126cd4..5008a05 100644
--- a/src/api_client.ts
+++ b/src/api_client.ts
@@ -2,7 +2,7 @@ import fetch from 'node-fetch';
import * as sse from 'fetch-sse';
import * as model from './api_model.js';
-const version = '0.20.0';
+const version = '0.21.0';
/** Client provides access the PredictionGuard API. */
export class Client {
@@ -24,16 +24,6 @@ export class Client {
// -------------------------------------------------------------------------
// Chat
- /** Set of models supported by the chat APIs. */
- private chatModels = new Map([
- [model.Models.DeepseekCoder67BInstruct, true],
- [model.Models.Hermes2ProLlama38B, true],
- [model.Models.Hermes2ProMistral7B, true],
- [model.Models.LLama3SqlCoder8b, true],
- [model.Models.Llava157BHF, true],
- [model.Models.NeuralChat7B, true],
- ]);
-
/** Chat generates chat completions based on a conversation history.
*
* @example
@@ -44,7 +34,7 @@ export class Client {
*
* async function Chat() {
* const input = {
- * model: pg.Models.NeuralChat7B,
+ * model: 'Neural-Chat-7B',
* messages: [
* {
* role: pg.Roles.User,
@@ -54,7 +44,7 @@ export class Client {
* maxTokens: 1000,
* temperature: 0.1,
* topP: 0.1,
- * topK: 50.0,
+ * topK: 50,
* options: {
* factuality: true,
* toxicity: true,
@@ -98,10 +88,6 @@ export class Client {
return [zero, {error: 'model is a mandatory input'}];
}
- if (!this.chatModels.get(input.model)) {
- return [zero, {error: 'model specified is not supported'}];
- }
-
if (!input.hasOwnProperty('messages')) {
return [zero, {error: 'messages is a mandatory input'}];
}
@@ -201,7 +187,7 @@ export class Client {
*
* async function ChatSSE() {
* const input = {
- * model: pg.Models.NeuralChat7B,
+ * model: 'Neural-Chat-7B',
* messages: [
* {
* role: pg.Roles.User,
@@ -211,7 +197,7 @@ export class Client {
* maxTokens: 1000,
* temperature: 0.1,
* topP: 0.1,
- * topK: 50.0,
+ * topK: 50,
* onMessage: function (event, err) {
* if (err != null) {
* if (err.error == 'EOF') {
@@ -250,10 +236,6 @@ export class Client {
return {error: 'model is a mandatory input'};
}
- if (!this.chatModels.get(input.model)) {
- return {error: 'model specified is not supported'};
- }
-
if (!input.hasOwnProperty('messages')) {
return {error: 'messages is a mandatory input'};
}
@@ -327,7 +309,7 @@ export class Client {
* maxTokens: 1000,
* temperature: 0.1,
* topP: 0.1,
- * topK: 50.0,
+ * topK: 50,
* };
*
* var [result, err] = await client.ChatVision(input);
@@ -353,7 +335,7 @@ export class Client {
id: '',
object: '',
created: 0,
- model: model.Models.Llava157BHF,
+ model: '',
choices: [],
createdDate: function () {
return new Date(0);
@@ -361,6 +343,10 @@ export class Client {
};
try {
+ if (!input.hasOwnProperty('model')) {
+ return [zero, {error: 'model is a mandatory input'}];
+ }
+
if (!input.hasOwnProperty('role')) {
return [zero, {error: 'role is a mandatory input'}];
}
@@ -379,7 +365,7 @@ export class Client {
}
const m = new Map();
- m.set('model', model.Models.Llava157BHF);
+ m.set('model', input.model);
m.set('messages', [
{
role: input.role,
@@ -435,15 +421,6 @@ export class Client {
// -------------------------------------------------------------------------
// Completion
- /** Set of models supported by the completion API. */
- private completionModels = new Map([
- [model.Models.DeepseekCoder67BInstruct, true],
- [model.Models.Hermes2ProLlama38B, true],
- [model.Models.Hermes2ProMistral7B, true],
- [model.Models.NeuralChat7B, true],
- [model.Models.NousHermesLlama213B, true],
- ]);
-
/** Completion generates text completions based on the provided input.
*
* @example
@@ -454,12 +431,12 @@ export class Client {
*
* async function Completions() {
* const input = {
- * model: pg.Models.NeuralChat7B,
+ * model: 'Neural-Chat-7B',
* prompt: 'Will I lose my hair',
* maxTokens: 1000,
* temperature: 0.1,
* topP: 0.1,
- * topK: 50.0,
+ * topK: 50,
* };
*
* var [result, err] = await client.Completion(input);
@@ -496,10 +473,6 @@ export class Client {
return [zero, {error: 'model is a mandatory input'}];
}
- if (!this.completionModels.get(input.model)) {
- return [zero, {error: 'model specified is not supported'}];
- }
-
if (!input.hasOwnProperty('prompt')) {
return [zero, {error: 'prompt is a mandatory input'}];
}
@@ -563,7 +536,7 @@ export class Client {
* },
* ];
*
- * var [result, err] = await client.Embedding(input);
+ * var [result, err] = await client.Embedding('bridgetower-large-itm-mlm-itc', input);
* if (err != null) {
* console.log('ERROR:' + err.error);
* return;
@@ -577,18 +550,20 @@ export class Client {
* Embedding();
* ```
*
+ * @param {string} model - model to use.
+ *
* @param {model.EmbeddingInput[]} input - input represents a collection of
* text and images to vectorize.
*
* @returns - A Promise with a Embedding object and an Error object if
* the error is not null.
*/
- async Embedding(input: model.EmbeddingInput[]): Promise<[model.Embedding, model.Error | null]> {
+ async Embedding(model: string, input: model.EmbeddingInput[]): Promise<[model.Embedding, model.Error | null]> {
const zero: model.Embedding = {
id: '',
object: '',
created: 0,
- model: model.Models.BridgetowerLargeItmMlmItc,
+ model: '',
data: [],
createdDate: function () {
return new Date(0);
@@ -614,7 +589,7 @@ export class Client {
}
const body = {
- model: model.Models.BridgetowerLargeItmMlmItc,
+ model: model,
input: embeds,
};
diff --git a/src/api_model.ts b/src/api_model.ts
index 9010c25..9b80fdb 100644
--- a/src/api_model.ts
+++ b/src/api_model.ts
@@ -6,18 +6,6 @@ export interface Error {
// -----------------------------------------------------------------------------
-/** Models represents the set of models that can be used. */
-export enum Models {
- BridgetowerLargeItmMlmItc = 'bridgetower-large-itm-mlm-itc',
- DeepseekCoder67BInstruct = 'deepseek-coder-6.7b-instruct',
- Hermes2ProLlama38B = 'Hermes-2-Pro-Llama-3-8B',
- Hermes2ProMistral7B = 'Hermes-2-Pro-Mistral-7B',
- LLama3SqlCoder8b = 'llama-3-sqlcoder-8b',
- Llava157BHF = 'llava-1.5-7b-hf',
- NeuralChat7B = 'Neural-Chat-7B',
- NousHermesLlama213B = 'Nous-Hermes-Llama-213B',
-}
-
/** Roles represents the set of roles that a sender can represent themselves
* as. */
export enum Roles {
@@ -156,15 +144,8 @@ export interface ChatInputOptions {
/** ChatInput represents the full potential input options for chat. */
export interface ChatInput {
- /** model represents the model to use. You are restriced to these models:
- * DeepseekCoder67BInstruct
- * Hermes2ProLlama38B
- * Hermes2ProMistral7B
- * LLama3SqlCoder8b
- * Llava157BHF
- * NeuralChat7B
- */
- model: Models;
+ /** model represents the model to use. */
+ model: string;
/** messages represents the set of messages to process. */
messages: ChatInputMessage[];
@@ -225,7 +206,7 @@ export interface Chat {
created: number;
/** model represents the model used for generating the result. */
- model: Models;
+ model: string;
/** choices represents the collection of choices to choose from. */
choices: ChatChoice[];
@@ -238,15 +219,8 @@ export interface Chat {
/** ChatSSEInput represents the full potential input options for SSE chat. */
export interface ChatSSEInput {
- /** model represents the model to use. You are restriced to these models:
- * DeepseekCoder67BInstruct
- * Hermes2ProLlama38B
- * Hermes2ProMistral7B
- * LLama3SqlCoder8b
- * Llava157BHF
- * NeuralChat7B
- */
- model: Models;
+ /** model represents the model to use. */
+ model: string;
/** messages represents the set of messages to process. */
messages: ChatInputMessage[];
@@ -308,7 +282,7 @@ export interface ChatSSE {
created: number;
/** model represents the model used for generating the result. */
- model: Models;
+ model: string;
/** choices represents the collection of choices to choose from. */
choices: ChatSSEChoice[];
@@ -321,6 +295,9 @@ export interface ChatSSE {
/** ChatVisionInput represents the full potential input options for Vision chat. */
export interface ChatVisionInput {
+ /** model represents the model to use. */
+ model: string;
+
/** role represents the role of the sender (user or assistant). */
role: Roles;
@@ -382,7 +359,7 @@ export interface ChatVision {
created: number;
/** model represents the model used for generating the result. */
- model: Models;
+ model: string;
/** choices represents the collection of choices to choose from. */
choices: ChatVisionChoice[];
@@ -395,14 +372,8 @@ export interface ChatVision {
/** CompletionInput represents the full potential input options for completion. */
export interface CompletionInput {
- /** model represents the model to use. You are restriced to these models:
- * DeepseekCoder67BInstruct
- * Hermes2ProLlama38B
- * Hermes2ProMistral7B
- * NeuralChat7B
- * NousHermesLlama213B
- */
- model: Models;
+ /** model represents the model to use. */
+ model: string;
/** prompt represents the prompt to process. */
prompt: string;
@@ -428,7 +399,7 @@ export interface CompletionChoice {
/** model represents the model used for generating the result for
* this choice. */
- model: Models;
+ model: string;
/** status represents if the response for this choice was successful
* or not. */
@@ -496,7 +467,7 @@ export interface Embedding {
created: number;
/** model represents the model used for generating the result. */
- model: Models;
+ model: string;
/** EmbeddingData represents the collection of vector points. */
data: EmbeddingData[];
diff --git a/test/api_test.js b/test/api_test.js
index 3b95702..9185972 100644
--- a/test/api_test.js
+++ b/test/api_test.js
@@ -270,7 +270,7 @@ async function testChatBasic() {
const client = new pg.Client('http://localhost:8080', 'any key');
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
@@ -309,6 +309,7 @@ async function testChatVision() {
};
const input = {
+ model: 'llava-1.5-7b-hf',
role: pg.Roles.User,
question: 'is there a deer in this picture',
image: imageMock,
@@ -332,7 +333,7 @@ async function testChatBadkey() {
const client = new pg.Client('http://localhost:8080', '');
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
messages: [
{
role: pg.Roles.User,
@@ -388,7 +389,7 @@ async function testCompletionBasic() {
const client = new pg.Client('http://localhost:8080', 'any key');
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
prompt: 'Will I lose my hair',
maxTokens: 1000,
temperature: 0.1,
@@ -410,7 +411,7 @@ async function testCompletionBadkey() {
const client = new pg.Client('http://localhost:8080', '');
const input = {
- model: pg.Models.NeuralChat7B,
+ model: 'Neural-Chat-7B',
prompt: 'Will I lose my hair',
maxTokens: 1000,
temperature: 0.1,
@@ -463,7 +464,7 @@ async function testEmbeddingBasic() {
},
];
- var [result, err] = await client.Embedding(input);
+ var [result, err] = await client.Embedding('bridgetower-large-itm-mlm-itc', input);
if (err != null) {
assert.fail('ERROR:' + err.error);
}
@@ -490,7 +491,7 @@ async function testEmbeddingBadkey() {
},
];
- var [, err] = await client.Embedding(input);
+ var [, err] = await client.Embedding('bridgetower-large-itm-mlm-itc', input);
if (err == null) {
assert.fail("didn't get an error");
}
Client provides access the PredictionGuard API.
-