From 1806ba5f95f01bcf9079f5631922774738383668 Mon Sep 17 00:00:00 2001 From: Marco Minerva Date: Wed, 6 Nov 2024 10:51:37 +0100 Subject: [PATCH] Add new models and update token properties Updated README.md to list new models (`o1-preview` and `o1-mini`) and revised configuration examples to use `MaxCompletionTokens`. Updated appsettings.json to reflect the new `MaxCompletionTokens` property in configuration examples. Added constants for new models in OpenAIChatGptModels.cs with descriptions and token support details. --- README.md | 4 ++++ samples/ChatGptConsole/appsettings.json | 2 +- .../appsettings.json | 2 +- samples/ChatGptStreamConsole/appsettings.json | 1 + src/ChatGptNet/Models/OpenAIChatGptModels.cs | 18 ++++++++++++++++++ 5 files changed, 25 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index f02f0f3..bc5a4c7 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ builder.Services.AddChatGpt(options => options.DefaultParameters = new ChatGptParameters { MaxTokens = 800, + //MaxCompletionTokens = 800, // o1 series models support this property instead of MaxTokens Temperature = 0.7 }; }); @@ -85,6 +86,8 @@ Currently available models are: - gpt-4-turbo - gpt-4o - gpt-4o-mini +- o1-preview +- o1-mini They have fixed names, available in the [OpenAIChatGptModels.cs file](https://github.com/marcominerva/ChatGptNet/blob/master/src/ChatGptNet/Models/OpenAIChatGptModels.cs). @@ -163,6 +166,7 @@ The configuration can be automatically read from [IConfiguration](https://learn. // "Temperature": 0.8, // "TopP": 1, // "MaxTokens": 500, + // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens // "PresencePenalty": 0, // "FrequencyPenalty": 0, // "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object diff --git a/samples/ChatGptConsole/appsettings.json b/samples/ChatGptConsole/appsettings.json index 132256b..bf70f22 100644 --- a/samples/ChatGptConsole/appsettings.json +++ b/samples/ChatGptConsole/appsettings.json @@ -17,7 +17,7 @@ // "Temperature": 0.8, // "TopP": 1, // "MaxTokens": 500, - // "MaxCompletionTokens": null, // o1 series models supports this property instead of MaxTokens + // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens // "PresencePenalty": 0, // "FrequencyPenalty": 0, // "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object diff --git a/samples/ChatGptFunctionCallingConsole/appsettings.json b/samples/ChatGptFunctionCallingConsole/appsettings.json index 132256b..bf70f22 100644 --- a/samples/ChatGptFunctionCallingConsole/appsettings.json +++ b/samples/ChatGptFunctionCallingConsole/appsettings.json @@ -17,7 +17,7 @@ // "Temperature": 0.8, // "TopP": 1, // "MaxTokens": 500, - // "MaxCompletionTokens": null, // o1 series models supports this property instead of MaxTokens + // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens // "PresencePenalty": 0, // "FrequencyPenalty": 0, // "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object diff --git a/samples/ChatGptStreamConsole/appsettings.json b/samples/ChatGptStreamConsole/appsettings.json index c58b74d..7988164 100644 --- a/samples/ChatGptStreamConsole/appsettings.json +++ b/samples/ChatGptStreamConsole/appsettings.json @@ -17,6 +17,7 @@ // "Temperature": 0.8, // "TopP": 1, // "MaxTokens": 500, + // "MaxCompletionTokens": null, // o1 series models support this property instead of MaxTokens // "PresencePenalty": 0, // "FrequencyPenalty": 0, // "ResponseFormat": { "Type": "text" }, // Allowed values for Type: text (default) or json_object diff --git a/src/ChatGptNet/Models/OpenAIChatGptModels.cs b/src/ChatGptNet/Models/OpenAIChatGptModels.cs index 0bc2878..94856b7 100644 --- a/src/ChatGptNet/Models/OpenAIChatGptModels.cs +++ b/src/ChatGptNet/Models/OpenAIChatGptModels.cs @@ -69,4 +69,22 @@ public static class OpenAIChatGptModels /// See GPT-4 for more information. /// public const string Gpt4_o_mini = "gpt-4o-mini"; + + /// + /// Reasoning model designed to solve hard problems across domains. + /// + /// + /// This model supports 128.000 tokens and returns a maximum of 32.768 outpout tokens. + /// See o1-preview and o1-mini for more information. + /// + public const string O1_preview = "o1-preview"; + + /// + /// Faster and cheaper reasoning model particularly good at coding, math, and science. + /// + /// + /// This model supports 128.000 tokens and returns a maximum of 32.768 outpout tokens. + /// See o1-preview and o1-mini for more information. + /// + public const string O1_mini = "o1-mini"; } \ No newline at end of file