Skip to content

Commit

Permalink
README: update examples (#955)
Browse files Browse the repository at this point in the history
  • Loading branch information
gary149 authored Oct 7, 2024
1 parent 95d8fbf commit 67fbf7a
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 47 deletions.
70 changes: 37 additions & 33 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,24 @@ await uploadFile({
}
});

// Use hosted inference

await inference.translation({
model: 't5-base',
inputs: 'My name is Wolfgang and I live in Berlin'
})
// Use Inference API

await inference.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{
role: "user",
content: "Hello, nice to meet you!",
},
],
max_tokens: 512,
temperature: 0.5,
});

await inference.textToImage({
model: 'stabilityai/stable-diffusion-2',
inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]',
parameters: {
negative_prompt: 'blurry',
}
})
model: "black-forest-labs/FLUX.1-dev",
inputs: "a picture of a green bird",
});

// and much more…
```
Expand Down Expand Up @@ -123,33 +127,33 @@ const inference = new HfInference(HF_TOKEN);

// Chat completion API
const out = await inference.chatCompletion({
model: "mistralai/Mistral-7B-Instruct-v0.2",
messages: [{ role: "user", content: "Complete the this sentence with words one plus one is equal " }],
max_tokens: 100
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512
});
console.log(out.choices[0].message);

// Streaming chat completion API
for await (const chunk of inference.chatCompletionStream({
model: "mistralai/Mistral-7B-Instruct-v0.2",
messages: [{ role: "user", content: "Complete the this sentence with words one plus one is equal " }],
max_tokens: 100
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512
})) {
console.log(chunk.choices[0].delta.content);
}

// You can also omit "model" to use the recommended model for the task
await inference.translation({
model: 't5-base',
inputs: 'My name is Wolfgang and I live in Amsterdam'
})
await hf.translation({
inputs: "My name is Wolfgang and I live in Amsterdam",
parameters: {
src_lang: "en",
tgt_lang: "fr",
},
});

await inference.textToImage({
model: 'stabilityai/stable-diffusion-2',
inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]',
parameters: {
negative_prompt: 'blurry',
}
model: 'black-forest-labs/FLUX.1-dev',
inputs: 'a picture of a green bird',
})

await inference.imageToText({
Expand All @@ -162,13 +166,13 @@ const gpt2 = inference.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface
const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});

//Chat Completion
const mistal = inference.endpoint(
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
const llamaEndpoint = inference.endpoint(
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct"
);
const out = await mistal.chatCompletion({
model: "mistralai/Mistral-7B-Instruct-v0.2",
messages: [{ role: "user", content: "Complete the this sentence with words one plus one is equal " }],
max_tokens: 100,
const out = await llamaEndpoint.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512,
});
console.log(out.choices[0].message);
```
Expand Down
23 changes: 9 additions & 14 deletions packages/inference/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -91,23 +91,21 @@ Using the `chatCompletion` method, you can generate text with models compatible
```typescript
// Non-streaming API
const out = await hf.chatCompletion({
model: "mistralai/Mistral-7B-Instruct-v0.2",
messages: [{ role: "user", content: "Complete the this sentence with words one plus one is equal " }],
max_tokens: 500,
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
max_tokens: 512,
temperature: 0.1,
seed: 0,
});

// Streaming API
let out = "";
for await (const chunk of hf.chatCompletionStream({
model: "mistralai/Mistral-7B-Instruct-v0.2",
model: "meta-llama/Llama-3.1-8B-Instruct",
messages: [
{ role: "user", content: "Complete the equation 1+1= ,just the answer" },
{ role: "user", content: "Can you help me solve an equation?" },
],
max_tokens: 500,
max_tokens: 512,
temperature: 0.1,
seed: 0,
})) {
if (chunk.choices && chunk.choices.length > 0) {
out += chunk.choices[0].delta.content;
Expand Down Expand Up @@ -396,11 +394,8 @@ Creates an image from a text prompt.

```typescript
await hf.textToImage({
inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]',
model: 'stabilityai/stable-diffusion-2',
parameters: {
negative_prompt: 'blurry',
}
model: 'black-forest-labs/FLUX.1-dev',
inputs: 'a picture of a green bird'
})
```

Expand Down Expand Up @@ -583,7 +578,7 @@ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the

// Chat Completion Example
const ep = hf.endpoint(
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2"
"https://api-inference.huggingface.co/models/meta-llama/Llama-3.1-8B-Instruct"
);
const stream = ep.chatCompletionStream({
model: "tgi",
Expand Down

0 comments on commit 67fbf7a

Please sign in to comment.