From 72cd073511481fd32a6eb540f154546ce7bde641 Mon Sep 17 00:00:00 2001 From: lukas Date: Sat, 16 Sep 2023 20:10:10 +0200 Subject: [PATCH 1/3] print total api costs --- gpt_engineer/ai.py | 21 +++++++++++++++++++++ gpt_engineer/main.py | 2 ++ 2 files changed, 23 insertions(+) diff --git a/gpt_engineer/ai.py b/gpt_engineer/ai.py index da923c3cf7..914cb16d09 100644 --- a/gpt_engineer/ai.py +++ b/gpt_engineer/ai.py @@ -271,6 +271,27 @@ def format_token_usage_log(self) -> str: result += str(log.total_tokens) + "\n" return result + def usage_cost(self) -> float: + """ + Return the total cost in USD of the api usage. + + Returns + ------- + float + Cost in USD. + """ + prompt_price = 0.03 + completion_price = 0.06 + if self.model_name == "gpt-3.5-turbo": + prompt_price = 0.0015 + completion_price = 0.002 + + result = 0 + for log in self.token_usage_log: + result += log.total_prompt_tokens / 1000 * prompt_price + result += log.total_completion_tokens / 1000 * completion_price + return result + def num_tokens(self, txt: str) -> int: """ Get the number of tokens in a text. diff --git a/gpt_engineer/main.py b/gpt_engineer/main.py index 68ed8807d1..cbe91ec4bd 100644 --- a/gpt_engineer/main.py +++ b/gpt_engineer/main.py @@ -96,6 +96,8 @@ def main( messages = step(ai, dbs) dbs.logs[step.__name__] = AI.serialize_messages(messages) + print("Total api cost:", ai.usage_cost()) + if collect_consent(): collect_learnings(model, temperature, steps, dbs) From 84da793af06388886072557613c2adf5e4c82334 Mon Sep 17 00:00:00 2001 From: lukas Date: Sat, 16 Sep 2023 22:04:41 +0200 Subject: [PATCH 2/3] get prices from langchain --- gpt_engineer/ai.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/gpt_engineer/ai.py b/gpt_engineer/ai.py index 914cb16d09..429e7f26d0 100644 --- a/gpt_engineer/ai.py +++ b/gpt_engineer/ai.py @@ -9,6 +9,7 @@ import openai import tiktoken +from langchain.callbacks.openai_info import MODEL_COST_PER_1K_TOKENS from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler from langchain.chat_models import AzureChatOpenAI, ChatOpenAI from langchain.chat_models.base import BaseChatModel @@ -280,11 +281,8 @@ def usage_cost(self) -> float: float Cost in USD. """ - prompt_price = 0.03 - completion_price = 0.06 - if self.model_name == "gpt-3.5-turbo": - prompt_price = 0.0015 - completion_price = 0.002 + prompt_price = MODEL_COST_PER_1K_TOKENS[self.model_name] + completion_price = MODEL_COST_PER_1K_TOKENS[self.model_name + "-completion"] result = 0 for log in self.token_usage_log: From 074b7435b5a43990f0d9f459147a7419e36aa21e Mon Sep 17 00:00:00 2001 From: lukas Date: Sun, 17 Sep 2023 11:20:41 +0200 Subject: [PATCH 3/3] add $ sign --- gpt_engineer/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gpt_engineer/main.py b/gpt_engineer/main.py index cbe91ec4bd..a1c88b9444 100644 --- a/gpt_engineer/main.py +++ b/gpt_engineer/main.py @@ -96,7 +96,7 @@ def main( messages = step(ai, dbs) dbs.logs[step.__name__] = AI.serialize_messages(messages) - print("Total api cost:", ai.usage_cost()) + print("Total api cost: $ ", ai.usage_cost()) if collect_consent(): collect_learnings(model, temperature, steps, dbs)