From 5998c3866040e03ea951305cd178b6751fe5115a Mon Sep 17 00:00:00 2001 From: Gilad S Date: Sun, 22 Sep 2024 00:32:19 +0300 Subject: [PATCH] docs: improve documentation --- .vitepress/components/BlogEntry/BlogEntry.vue | 2 +- .vitepress/components/HomePage/HomePage.vue | 14 +++++++++-- .vitepress/config.ts | 17 ++++++++++++- .vitepress/theme/style.css | 4 ++++ README.md | 2 +- docs/blog/blog.data.ts | 7 +++++- docs/guide/Metal.md | 6 +++++ docs/guide/chat-session.md | 24 +++++++++++++++++++ docs/guide/grammar.md | 2 +- docs/guide/index.md | 3 ++- docs/index.md | 2 +- package.json | 2 +- 12 files changed, 75 insertions(+), 10 deletions(-) diff --git a/.vitepress/components/BlogEntry/BlogEntry.vue b/.vitepress/components/BlogEntry/BlogEntry.vue index eef01278..bbd5ff61 100644 --- a/.vitepress/components/BlogEntry/BlogEntry.vue +++ b/.vitepress/components/BlogEntry/BlogEntry.vue @@ -47,7 +47,7 @@ const dateText = new Date(props.date).toLocaleDateString("en-US", { }" /> -

{{ props.description }}

+

Read more diff --git a/.vitepress/components/HomePage/HomePage.vue b/.vitepress/components/HomePage/HomePage.vue index e41e22b0..f377bc00 100644 --- a/.vitepress/components/HomePage/HomePage.vue +++ b/.vitepress/components/HomePage/HomePage.vue @@ -244,6 +244,16 @@ getElectronExampleAppDownloadLink() } } +:global(.VPHome .VPHero .container .main) { + &:global(>.name) { + font-weight: 701; + } + + &:global(>.text) { + font-weight: 699; + } +} + :global(html.start-animation) { .content { transition: opacity 0.5s 0.25s, transform 0.5s 0.25s, translate 0.5s, display 1s ease-in-out; @@ -292,7 +302,7 @@ getElectronExampleAppDownloadLink() } } - &:global(> .text) { + &:global(>.text) { transition: font-weight 0.5s ease-in-out; @starting-style { @@ -301,7 +311,7 @@ getElectronExampleAppDownloadLink() } } - &:global(> .tagline) { + &:global(>.tagline) { transition: transform 0.5s ease-in-out; @starting-style { diff --git a/.vitepress/config.ts b/.vitepress/config.ts index a20a2d26..06c4c244 100644 --- a/.vitepress/config.ts +++ b/.vitepress/config.ts @@ -324,7 +324,22 @@ export default defineConfig({ search: { provider: "local", options: { - detailedView: true + detailedView: true, + miniSearch: { + searchOptions: { + boostDocument(term, documentId, storedFields) { + const firstTitle = (storedFields?.titles as string[])?.[0]; + if (firstTitle?.startsWith("Type Alias: ")) + return -0.8; + else if (firstTitle?.startsWith("Class: ")) + return -0.9; + else if (firstTitle?.startsWith("Function: ")) + return -0.95; + + return 1; + } + } + } } }, sidebar: { diff --git a/.vitepress/theme/style.css b/.vitepress/theme/style.css index f2492b4d..f4d079c7 100644 --- a/.vitepress/theme/style.css +++ b/.vitepress/theme/style.css @@ -547,6 +547,10 @@ html.blog-page .vp-doc h2 { border-top: none; } +html.blog-page .vp-doc>div>hr:first-of-type { + display: none; +} + /*#VPContent {*/ /* background-image: radial-gradient(1200px 380px at 50% 0%, color-mix(in srgb, var(--vp-c-brand-1) 32%, transparent), transparent 64%);*/ /*}*/ diff --git a/README.md b/README.md index 63bbe721..2e2417f0 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ * A Complete suite of everything you need to use LLMs in your projects * [Use the CLI to chat with a model without writing any code](#try-it-without-installing) * Up-to-date with the latest `llama.cpp`. Download and compile the latest release with a [single CLI command](https://node-llama-cpp.withcat.ai//guide/building-from-source#downloading-a-release) -* Force a model to generate output in a parseable format, [like JSON](https://node-llama-cpp.withcat.ai/guide/chat-session#json-response), or even force it to [follow a specific JSON schema](https://node-llama-cpp.withcat.ai/guide/chat-session#response-json-schema) +* Enforce a model to generate output in a parseable format, [like JSON](https://node-llama-cpp.withcat.ai/guide/chat-session#json-response), or even force it to [follow a specific JSON schema](https://node-llama-cpp.withcat.ai/guide/chat-session#response-json-schema) * [Provide a model with functions it can call on demand](https://node-llama-cpp.withcat.ai/guide/chat-session#function-calling) to retrieve information of perform actions * [Embedding support](https://node-llama-cpp.withcat.ai/guide/embedding) * Great developer experience with full TypeScript support, and [complete documentation](https://node-llama-cpp.withcat.ai/guide/) diff --git a/docs/blog/blog.data.ts b/docs/blog/blog.data.ts index eafb2aae..7dd44f7c 100644 --- a/docs/blog/blog.data.ts +++ b/docs/blog/blog.data.ts @@ -1,5 +1,6 @@ import {createContentLoader} from "vitepress"; import {ensureLocalImage} from "../../.vitepress/utils/ensureLocalImage.js"; +import {htmlEscape} from "../../.vitepress/utils/htmlEscape.js"; const loader = { async load() { @@ -17,7 +18,11 @@ const loader = { return { title: post.frontmatter.title as string | undefined, date: post.frontmatter.date as string | undefined, - description: post.excerpt || post.frontmatter.description as string | undefined, + description: post.excerpt || ( + (post.frontmatter.description as string | undefined) != null + ? htmlEscape(post.frontmatter.description as string) + : undefined + ), link: post.url, image: await getImage( typeof post.frontmatter.image === "string" diff --git a/docs/guide/Metal.md b/docs/guide/Metal.md index 3fbadc25..5798e31b 100644 --- a/docs/guide/Metal.md +++ b/docs/guide/Metal.md @@ -8,6 +8,12 @@ and when building from source on macOS on Apple Silicon Macs, Metal support is e `llama.cpp` doesn't support Metal well on Intel Macs, so it is disabled by default on those machines. +
+ +[Accelerate framework](https://developer.apple.com/accelerate/) is always enabled on Mac. + +
+ ## Toggling Metal Support {#building} ### Prerequisites * [`cmake-js` dependencies](https://github.com/cmake-js/cmake-js#:~:text=projectRoot/build%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%20%5Bstring%5D-,Requirements%3A,-CMake) diff --git a/docs/guide/chat-session.md b/docs/guide/chat-session.md index bb6b994c..3f8c3cb5 100644 --- a/docs/guide/chat-session.md +++ b/docs/guide/chat-session.md @@ -534,6 +534,30 @@ console.log("AI: " + res); ``` ## Complete User Prompt {#complete-prompt} + + + +
+ +You can try this feature in the example Electron app. +Just type a prompt and see the completion generated by the model. + +
+ You can generate a completion to a given incomplete user prompt and let the model complete it. The advantage of doing that on the chat session is that it will use the chat history as context for the completion, diff --git a/docs/guide/grammar.md b/docs/guide/grammar.md index e3370e88..eb3bad1c 100644 --- a/docs/guide/grammar.md +++ b/docs/guide/grammar.md @@ -1,5 +1,5 @@ # Using Grammar -Use this to force a model to generate response in a specific format of text, like `JSON` for example. +Use this to enforce a model to generate response in a specific format of text, like `JSON` for example. ::: tip NOTE diff --git a/docs/guide/index.md b/docs/guide/index.md index 5e31707c..e8007022 100644 --- a/docs/guide/index.md +++ b/docs/guide/index.md @@ -39,6 +39,7 @@ as well as balances the default settings to get the best performance from your h No need to manually configure anything. **Metal:** Enabled by default on Macs with Apple Silicon. If you're using a Mac with an Intel chip, [you can manually enable it](./Metal.md). +[Accelerate framework](https://developer.apple.com/accelerate/) is always enabled. **CUDA:** Used by default when support is detected. For more details, see the [CUDA guide](./CUDA.md). @@ -126,7 +127,7 @@ console.log("AI: " + a2); ### Chatbot With JSON Schema {#chatbot-with-json-schema} -To force a model to generate output according to a JSON schema, use [`llama.createGrammarForJsonSchema()`](../api/classes/Llama.md#creategrammarforjsonschema). +To enforce a model to generate output according to a JSON schema, use [`llama.createGrammarForJsonSchema()`](../api/classes/Llama.md#creategrammarforjsonschema). It'll force the model to generate output according to the JSON schema you provide, and it'll do it on the text generation level. diff --git a/docs/index.md b/docs/index.md index 77d4b747..899cc407 100644 --- a/docs/index.md +++ b/docs/index.md @@ -42,7 +42,7 @@ features: linkText: Learn more - icon: title: Powerful features - details: Force a model to generate output according to a JSON schema, give a model functions it can call on demand, and much more + details: Enforce a model to generate output according to a JSON schema, provide a model with functions it can call on demand, and much more link: /guide/grammar#json-schema linkText: Learn more --- diff --git a/package.json b/package.json index 1bb13236..1fd93f43 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "node-llama-cpp", "version": "0.1.0", - "description": "Run AI models locally on your machine with node.js bindings for llama.cpp. Force a JSON schema on the model output on the generation level", + "description": "Run AI models locally on your machine with node.js bindings for llama.cpp. Enforce a JSON schema on the model output on the generation level", "main": "./dist/index.js", "type": "module", "types": "./dist/index.d.ts",