diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_get_models.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_get_models.py new file mode 100644 index 00000000000..e15eb71718f --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_get_models.py @@ -0,0 +1,61 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os + +import boto3 + +from utils.custom_logging import setup_custom_logger +from utils.timeit import timeit + + +class BedrockModelService: + """ + A class to interact with the Amazon Bedrock service, specifically for operations + related to foundation models. + """ + + def __init__(self): + """ + Initializes the BedrockModelService with a boto3 client for Bedrock and a custom logger. + """ + self.bedrock_client = boto3.client(service_name="bedrock") + self.logger = setup_custom_logger(os.path.basename(__file__)) + + @timeit + def get_models(self): + """ + Fetches a list of foundation models from the Bedrock AI service and logs their IDs. + + This method uses the `boto3` library to interact with the Bedrock AI service, + retrieving a list of foundation models. Each model's ID is then logged using + a custom logger. The method is decorated with `@timeit` to measure its execution time. + + Exception Handling: + Catches and logs exceptions that may occur during the interaction with the Bedrock service. + + Logging: + Logs the total number of models found and each model's ID at DEBUG level. If no models are found + or an exception occurs, appropriate warnings or errors are logged. + """ + try: + # Request a list of foundation models from Bedrock + model_list = self.bedrock_client.list_foundation_models() + + # Extract model summaries from the response + model_summaries = model_list.get("modelSummaries") + if model_summaries is not None: + self.logger.info(f"Found models: {len(model_summaries)}") + # Log each model's ID + for model in model_summaries: + self.logger.debug(model["modelId"]) + else: + self.logger.warning("No model summaries found in the Bedrock response.") + except Exception as e: + # Log any exceptions that occur during the process + self.logger.error( + f"Failed to retrieve models from Bedrock: {e}", exc_info=True + ) + + +bedrock_service = BedrockModelService() +bedrock_service.get_models() diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_try_claude3.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_try_claude3.py new file mode 100644 index 00000000000..6bac4d93069 --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/0_try_claude3.py @@ -0,0 +1,224 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os + +import boto3 + +from utils.custom_logging import setup_custom_logger +from utils.timeit import timeit + + +class BedrockAIConverter: + """ + A class that provides methods for converting between images and text, and generating images from text, + using generative AI models hosted on Amazon Bedrock. + """ + + def __init__(self): + """ + Initializes the BedrockAIConverter with a BedrockRuntime client and a custom logger. + """ + self.bedrock_runtime_client = boto3.client("bedrock-runtime") + self.logger = setup_custom_logger(os.path.basename(__file__)) + + @timeit + def img_to_txt(self): + """ + Converts an image to text by sending the image to a generative AI model + hosted on Bedrock. Reads an image file, encodes it in base64, and sends + it to the model with a prompt. Logs and returns the model's response. + + Returns: + str: The text generated by the model based on the image. + + Raises: + Exception: If the process fails at any point. + """ + try: + import base64 + import json + import sys + + sys.path.append("../data/resources") + with open("../data/resources/entry.jpeg", "rb") as image_file: + encoded_string = base64.b64encode(image_file.read()) + base64_string = encoded_string.decode("utf-8") + + prompt = """ + ###### + Describe the site in the photo with as much detail as you can. + Do not mention an image, as it will confuse later prompting in this chain. + Just provide a description of the site with no mention of any image. + ###### + """ + self.logger.info(f"PROMPT:\n{prompt}\n") + + payload = { + "modelId": "anthropic.claude-3-sonnet-20240229-v1:0", + "contentType": "application/json", + "accept": "application/json", + "body": { + "anthropic_version": "bedrock-2023-05-31", + "max_tokens": 1000, + "messages": [ + { + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": "image/png", + "data": base64_string, + }, + }, + { + "type": "text", + "text": prompt, + }, + ], + } + ], + }, + } + + body_bytes = json.dumps(payload["body"]).encode("utf-8") + response = self.bedrock_runtime_client.invoke_model( + body=body_bytes, + contentType=payload["contentType"], + accept=payload["accept"], + modelId=payload["modelId"], + ) + + response_body = response["body"].read().decode("utf-8") + data = json.loads(response_body) + text = data["content"][0]["text"] + self.logger.warning(text) + return text + except Exception as e: + self.logger.error(f"Failed to convert image to text: {e}") + raise + + @timeit + def txt_to_txt(self, text): + """ + Invokes a text-to-text generative AI model to generate an idea based on the given text description. + Logs and returns the model's generated text. + + Args: + text (str): The input text description to base the generative model's response on. + + Returns: + str: The text generated by the model. + + Raises: + Exception: If the process fails at any point. + """ + try: + import json + + prompt = f""" + ###### + You are a skilled event planner with a knack for coming + up with low-cost high tech improvements to event spaces, + particularly for tech conferences. + + Based on the following description, describe a single thrilling + design improvement that would heighten the experience at the + rvatech/ AI & Data Summit 2024, which is a day-long event + where attendees will pass through this site in high + volumes. Something small, nerdy, tech-oriented, and interactive. + + Update the starter description provided in such a way that + your new improvement idea has been implemented. How would it look in the + site description provided? + + Starter Description: {text} + + Updated description: + ###### + """ + self.logger.info(f"PROMPT:\n{prompt}\n") + + response = self.bedrock_runtime_client.invoke_model( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + body=json.dumps( + { + "anthropic_version": "bedrock-2023-05-31", + "max_tokens": 1024, + "messages": [ + { + "role": "user", + "content": [{"type": "text", "text": prompt}], + } + ], + } + ), + ) + + result = json.loads(response.get("body").read()) + self.logger.critical(response) + output_list = result.get("content", []) + text = output_list[0]["text"] + self.logger.warning(text) + return text + except Exception as e: + self.logger.error(f"Failed to generate text from text: {e}") + raise + + @timeit + def txt_to_img(self, text): + """ + Converts given text to an image by sending the text to a generative AI model + that produces images. Decodes and saves the response image. + + Args: + text (str): The text description to convert to an image. + + Raises: + Exception: If the process fails at any point. + """ + try: + import json + from base64 import b64decode + from io import BytesIO + + from PIL import Image + + body = json.dumps( + { + "text_prompts": [{"text": text}], + "cfg_scale": 6, + "seed": 10, + "steps": 50, + } + ) + self.logger.info("PROMPT:\n" + f"\n{text}\n") + modelId = "stability.stable-diffusion-xl" + accept = "application/json" + contentType = "application/json" + response = self.bedrock_runtime_client.invoke_model( + body=body, modelId=modelId, accept=accept, contentType=contentType + ) + response = json.loads(response.get("body").read()) + images = response.get("artifacts") + image = Image.open(BytesIO(b64decode(images[0].get("base64")))) + image.save("2_try_claude3.png") + except Exception as e: + self.logger.error(f"Failed to convert text to image: {e}") + raise + + +converter = BedrockAIConverter() +description = converter.img_to_txt() +design_idea = converter.txt_to_txt(description) +converter.txt_to_img( + f""" + ##### + You are a whimsical artist with big ideas. + + Generate a new detailed drawing showcasing the following site: + {design_idea} + ###### + """ +) diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/README.md b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/README.md new file mode 100644 index 00000000000..131e234f15b --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/README.md @@ -0,0 +1,24 @@ +# Running a RAG-enhanced chatbot on Bedrock using Claude 3, Knowledge Base, and LangChain +This directory contains code used for a live demo. It should be run sequentially, as follows: +1. 0_get_models.py +2. 0_try_claude3.py +3. fordgpt_v1.py +4. fordgpt_v2.py +5. fordgpt_v3.py + +Note: FordGPTv2-3 (step 4-5) require AWS data stores containing certain information that is not included in this repository. + +### Usage +This is reference code that should not be copied directly into production without review. + +Running foundation models costs money. This demo presumes you have billing alarms configured and an understanding of Amazon Bedrock pricing. + +## About FordGPT +![fordgpt.jpg](fordgpt.jpg) + +# Requirements +Speaks fluently on: +* Popular companies +* Meetup groups & events +* Best coffee shops for coding +* Fail-safe job hunting resources diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt.jpg b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt.jpg new file mode 100644 index 00000000000..b127c921af5 Binary files /dev/null and b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt.jpg differ diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v1.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v1.py new file mode 100644 index 00000000000..c384991f1fb --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v1.py @@ -0,0 +1,100 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +""" +This example is part of a larger live demo. +""" +import os + +import boto3 + +from utils.custom_logging import setup_custom_logger +from utils.timeit import timeit + + +class FordGPTv1: + """ + A class that simulates conversational responses from Ford Prior, a fictional character + known for his background in cloud engineering and DevOps practices. It utilizes the + LangChain library for dynamic prompt construction and AWS Boto3 for accessing the + Bedrock runtime AI model. The conversation continues until the user says 'bye'. + """ + + def __init__(self): + """ + Initializes the FordPriorConversationalResponder with a Bedrock runtime client and a custom logger. + """ + self.bedrock_runtime_client = boto3.client(service_name="bedrock-runtime") + self.logger = setup_custom_logger(os.path.basename(__file__)) + + @timeit + def reply_no_rag(self): + """ + Generates and logs replies based on user input using the Ford Prior persona in a conversational context. + The conversation continues until the user inputs 'bye'. Each user input is processed to generate + a contextually relevant and snarky reply that reflects Ford's persona. + + Exception Handling: + Captures and logs any exceptions, terminating the conversation on error. + + User Interaction: + Maintains a loop for conversational interaction with the user until 'bye' is mentioned. + """ + import json + + from langchain_core.prompts import PromptTemplate + + multi_var_prompt = PromptTemplate( + input_variables=["input"], + template=""" + Imagine you are Ford Prior, a seasoned cloud and DevOps engineer with a + rich background in cloud engineering, DevOps practices, and software development. + + Your response should reflect Ford's style: extremely concise, insightful, and + very direct with a side of snark. Only help if asked; otherwise be cheeky + but truthful. + + If you are asked a question directly, ask 1 question to gain context, + then dive in confidently with a local tip for getting into + the Richmond tech scene based on your context and knowledge. + + Do not self refer or admit you are a bot. You are Ford Prior. + + User: {input} + + How would you reply? No intro; just reply as you would: + """, + ) + + input_text = input("You: ") + while "bye" not in input_text.lower(): + try: + prompt = multi_var_prompt.format(input=input_text) + response = self.bedrock_runtime_client.invoke_model( + modelId="anthropic.claude-3-sonnet-20240229-v1:0", + body=json.dumps( + { + "anthropic_version": "bedrock-2023-05-31", + "max_tokens": 500, + "messages": [ + { + "role": "user", + "content": [{"type": "text", "text": prompt}], + } + ], + } + ), + ) + bytes_content = response["body"].read() + data = json.loads(bytes_content.decode("utf-8")) + text_contents = [ + item["text"] for item in data["content"] if item["type"] == "text" + ] + self.logger.warning(text_contents[0]) + input_text = input("You: ") + except Exception as e: + self.logger.error(f"Error during conversation: {e}") + break + + +bot = FordGPTv1() +bot.reply_no_rag() diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v2.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v2.py new file mode 100644 index 00000000000..977836668ac --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v2.py @@ -0,0 +1,121 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os + +import boto3 +from langchain_community.chat_models import BedrockChat + +from utils.custom_logging import setup_custom_logger +from utils.timeit import timeit + + +class FordGPTv2: + """ + A class encapsulating the functionality to run a conversational application + simulating interactions as Ford Prior, using context from a resume loaded from + Amazon Simple Storage Service (Amazon S3). + + It leverages the LangChain library for managing conversation flows and the AWS Boto3 + client for interacting with the Amazon Bedrock runtime. + """ + + def __init__(self): + """ + Initializes the conversational app with a custom logger, a Amazon Bedrock runtime client, + and sets up the LangChain's BedrockChat model for conversation handling. + """ + self.logger = setup_custom_logger(os.path.basename(__file__)) + self.boto3_bedrock = boto3.client(service_name="bedrock-runtime") + self.llm = BedrockChat( + model_id="anthropic.claude-3-sonnet-20240229-v1:0", + client=self.boto3_bedrock, + ) + + @timeit + def run_app(self): + """ + Starts the conversational application, loading contextual data from a document + in Amazon S3, and engages in a conversation based on dynamically formatted prompts and + user inputs. + + The conversation continues until "bye" is included in a user's reply, integrating + context from Ford Prior's resume throughout the interaction. + + Exception Handling: + Captures and logs any exceptions, indicating failure to run the application. + """ + from langchain.chains import ConversationChain + from langchain.memory import ConversationBufferMemory + from langchain_community.document_loaders import S3FileLoader + from langchain_core.prompts import PromptTemplate + + try: + loader = S3FileLoader("bedrock-training-ford-gpt", "Profile.pdf") + resume = loader.load() + + template = PromptTemplate( + input_variables=["resume", "input_text"], + template=""" + Imagine you are Ford Prior, a seasoned cloud and DevOps engineer with a + rich background in cloud engineering, DevOps practices, and software development. + + Here is Ford's work history: {resume} + + Your response should reflect Ford's style: extremely concise, insightful, and + very direct with a side of snark. Only help if asked; otherwise, be cheeky + but truthful. + + If you are asked a question directly, ask one question to gain context, + then dive in confidently with a local tip for getting into + the Richmond tech scene based on your context and knowledge. + + Do not self refer or admit you are a bot. You are Ford Prior. + + User: {input_text} + + How would you reply? No intro; just reply as you would: + """, + ) + + prompt = template.format( + resume=resume, input_text="Introduce yourself in a single sentence." + ) + + conversation = ConversationChain( + llm=self.llm, + verbose=False, + memory=ConversationBufferMemory(return_messages=True), + ) + initial_prediction = conversation.predict(input=prompt) + self.logger.warning(initial_prediction) + + self.reply_with_document(template, "You:", conversation, resume) + except Exception as e: + self.logger.error(f"Application failed to run: {e}") + raise + + def reply_with_document(self, template, reply_text, conversation, resume): + """ + Facilitates a conversation using dynamically generated prompts, incorporating user input + and document context. Logs responses until "bye" is detected in user input. + + Args: + template (PromptTemplate): The template for generating conversation prompts. + reply_text (str): Initial text to prompt user input. + conversation (ConversationChain): The conversation handler. + resume (str): Contextual document content to include in prompts. + """ + reply = input(reply_text) + while "bye" not in reply.lower(): + try: + prompt = template.format(resume=resume, input_text=reply) + prediction = conversation.predict(input=prompt) + self.logger.warning(prediction) + reply = input("You:") + except Exception as e: + self.logger.error(f"Error during conversation: {e}") + break + + +app = FordGPTv2() +app.run_app() diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v3.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v3.py new file mode 100644 index 00000000000..c35d1ba4eae --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/fordgpt_v3.py @@ -0,0 +1,112 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os + +from langchain_community.chat_models import BedrockChat + +from utils.custom_logging import setup_custom_logger +from utils.timeit import timeit + + +class FordGPTv3: + """ + A conversational QA application simulating responses as Ford Prior, using a + RetrievalQA model integrated with a knowledge base and a large language model. + It processes user inputs in an interactive session, utilizing Ford's resume and + user queries to generate contextually relevant answers. + """ + + def __init__(self): + """ + Initializes the QA application with necessary components, including a custom logger, + the BedrockChat model for interaction with a large language model, and identifiers for + knowledge base and document retrieval. + """ + self.logger = setup_custom_logger(os.path.basename(__file__)) + self.model_id = "anthropic.claude-3-sonnet-20240229-v1:0" + self.knowledge_base_id = "TB1WKZHMYK" + self.llm = BedrockChat( + model_id=self.model_id, + region_name="us-east-1", + ) + + @timeit + def get_kb_answer(self, prompt, qa): + """ + Retrieves an answer from the RetrievalQA model based on the provided prompt. + + Args: + prompt (str): The user's prompt or question. + qa (RetrievalQA): An instance of the RetrievalQA model for answering queries. + + Returns: + str: The generated response based on the model and knowledge base retrieval. + """ + try: + response = qa.invoke(prompt) + return response["result"] + except Exception as e: + self.logger.error(f"Failed to retrieve knowledge base answer: {e}") + return "Sorry, I encountered an issue processing your request." + + def run_app(self): + """ + Executes the main application loop, initiating a conversational interface that + continuously processes user inputs until "bye" is detected, using Ford Prior's + persona and resume for context. + """ + from langchain.chains import RetrievalQA + from langchain_community.retrievers import AmazonKnowledgeBasesRetriever + from langchain_core.prompts import PromptTemplate + + try: + retriever = AmazonKnowledgeBasesRetriever( + knowledge_base_id=self.knowledge_base_id, + region_name="us-east-1", + retrieval_config={"vectorSearchConfiguration": {"numberOfResults": 1}}, + ) + qa = RetrievalQA.from_chain_type( + llm=self.llm, retriever=retriever, return_source_documents=False + ) + + # loader = S3FileLoader("bedrock-training-ford-gpt", "Profile.pdf") + # resume = loader.load() + resume = "See file in context." + + multi_var_prompt = PromptTemplate( + input_variables=["input", "resume"], + template=""" + Imagine you are Ford Prior, a seasoned cloud and DevOps engineer with a + rich background in cloud engineering, DevOps practices, and software development. + + His work experience is here: {resume} + + Your response should reflect Ford's style: extremely concise, insightful, and + very direct with a side of snark. Only help if asked; otherwise, be cheeky + but truthful. + + User: {input} + + How would you reply? No intro; just reply as you would: + """, + ) + + input_text = input("You: ") + prompt = multi_var_prompt.format(input=input_text, resume=resume) + + initial_response = self.get_kb_answer( + f"{prompt}\nIntroduce yourself in a sentence.", qa + ) + self.logger.warning(initial_response) + reply = input("\nYou: ") + + while "bye" not in reply.lower(): + response = self.get_kb_answer(reply, qa) + self.logger.warning(response) + reply = input("\nYou: ") + except Exception as e: + self.logger.error(f"Application failed to run: {e}") + + +app = FordGPTv3() +app.run_app() diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/requirements.txt b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/requirements.txt new file mode 100644 index 00000000000..2c83fceabda --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/requirements.txt @@ -0,0 +1,9 @@ +# brew install poppler +# brew install tesseract +unstructured +boto3 +langchain +unstructured[pdf] +langchain-community +pytz +anthropic \ No newline at end of file diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/colors.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/colors.py new file mode 100644 index 00000000000..09ad5f31a9a --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/colors.py @@ -0,0 +1,16 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + + +def colorprint(text, color): + colors = { + "red": "\033[91m", + "green": "\033[92m", + "yellow": "\033[93m", + "blue": "\033[94m", + "magenta": "\033[95m", + "cyan": "\033[96m", + "white": "\033[97m", + "end": "\033[0m", + } + print(colors[color] + text + colors["end"]) diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/custom_logging.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/custom_logging.py new file mode 100644 index 00000000000..449692daead --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/custom_logging.py @@ -0,0 +1,40 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import logging + +# ANSI color codes +COLOR_CODES = { + "DEBUG": "\033[94m", # Blue + "INFO": "\033[97m", # White + "WARNING": "\033[93m", # Yellow + "ERROR": "\033[91m", # Red + "CRITICAL": "\033[95m", # Purple + "RESET": "\033[0m", # Reset to default +} + + +# Custom Formatter +class ColoredFormatter(logging.Formatter): + def format(self, record): + level_name = record.levelname + message = logging.Formatter.format(self, record) + return COLOR_CODES.get(level_name, "") + message + COLOR_CODES["RESET"] + + +# Customizing logging handler to use the colored formatter +def setup_custom_logger(name): + # Create a logger + logger = logging.getLogger(name) + logger.setLevel(logging.DEBUG) # Setting to debug to catch all logs + + # Creating and setting the custom formatter + formatter = ColoredFormatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + + # Creating a stream handler (console output) and setting the custom formatter + console_handler = logging.StreamHandler() + console_handler.setFormatter(formatter) + + # Adding the handler to the logger + logger.addHandler(console_handler) + + return logger diff --git a/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/timeit.py b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/timeit.py new file mode 100644 index 00000000000..aa31683a2b4 --- /dev/null +++ b/python/example_code/bedrock-runtime/models/anthropic/claude3_chatbot_demo/utils/timeit.py @@ -0,0 +1,23 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +import os +import sys +import time + +sys.path.append("..") + +from utils.custom_logging import setup_custom_logger + +logger = setup_custom_logger(os.path.basename(__file__)) + + +def timeit(f): + def wrapper(*args, **kwargs): + start_time = time.time() + result = f(*args, **kwargs) + end_time = time.time() + elapsed_time = end_time - start_time + logger.error(f"Function {f.__name__} executed in {elapsed_time:.5f} seconds.") + return result + + return wrapper