From 2c0286e41122154b248da8aec3271a301e0c330a Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Thu, 17 Oct 2024 16:51:10 +0300 Subject: [PATCH 1/6] feat(backend): Add credit for Jina/Search & LLM blocks (#8361) --- .../backend/backend/blocks/llm.py | 36 +++++++++---------- .../backend/backend/data/credit.py | 5 +++ 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index f38b5f5da72d..a55dbdf10565 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -96,25 +96,25 @@ def cost_factor(self) -> int: MODEL_METADATA = { - LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=60), - LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=30), - LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=10), - LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=12), - LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=11), - LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=8), - LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=14), - LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=13), - LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=9), - LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=7), - LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=6), - LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=7), - LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=10), + LlmModel.O1_PREVIEW: ModelMetadata("openai", 32000, cost_factor=16), + LlmModel.O1_MINI: ModelMetadata("openai", 62000, cost_factor=4), + LlmModel.GPT4O_MINI: ModelMetadata("openai", 128000, cost_factor=1), + LlmModel.GPT4O: ModelMetadata("openai", 128000, cost_factor=3), + LlmModel.GPT4_TURBO: ModelMetadata("openai", 128000, cost_factor=10), + LlmModel.GPT3_5_TURBO: ModelMetadata("openai", 16385, cost_factor=1), + LlmModel.CLAUDE_3_5_SONNET: ModelMetadata("anthropic", 200000, cost_factor=4), + LlmModel.CLAUDE_3_HAIKU: ModelMetadata("anthropic", 200000, cost_factor=1), + LlmModel.LLAMA3_8B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_70B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.MIXTRAL_8X7B: ModelMetadata("groq", 32768, cost_factor=1), + LlmModel.GEMMA_7B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.GEMMA2_9B: ModelMetadata("groq", 8192, cost_factor=1), + LlmModel.LLAMA3_1_405B: ModelMetadata("groq", 8192, cost_factor=1), # Limited to 16k during preview - LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=15), - LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=13), - LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=7), - LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=11), + LlmModel.LLAMA3_1_70B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.LLAMA3_1_8B: ModelMetadata("groq", 131072, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_8B: ModelMetadata("ollama", 8192, cost_factor=1), + LlmModel.OLLAMA_LLAMA3_405B: ModelMetadata("ollama", 8192, cost_factor=1), } for model in LlmModel: diff --git a/autogpt_platform/backend/backend/data/credit.py b/autogpt_platform/backend/backend/data/credit.py index e1fccb42897f..5581a7854226 100644 --- a/autogpt_platform/backend/backend/data/credit.py +++ b/autogpt_platform/backend/backend/data/credit.py @@ -17,6 +17,7 @@ AITextSummarizerBlock, LlmModel, ) +from backend.blocks.search import ExtractWebsiteContentBlock, SearchTheWebBlock from backend.blocks.talking_head import CreateTalkingAvatarVideoBlock from backend.data.block import Block, BlockInput, get_block from backend.util.settings import Config @@ -74,6 +75,10 @@ def __init__( CreateTalkingAvatarVideoBlock: [ BlockCost(cost_amount=15, cost_filter={"api_key": None}) ], + SearchTheWebBlock: [BlockCost(cost_amount=1)], + ExtractWebsiteContentBlock: [ + BlockCost(cost_amount=1, cost_filter={"raw_content": False}) + ], } From 5d4d2486da1d74863a4f41b4ab57fcd9e2134d38 Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Thu, 17 Oct 2024 18:15:15 +0200 Subject: [PATCH 2/6] ci: Enforce `dev` as base branch for development (#8369) * Create repo-pr-enforce-base-branch.yml * fix quotes * test * fix github token * fix trigger and CLI config * change back trigger because otherwise I can't test it * fix the fix * fix repo selection * fix perms? * fix quotes and newlines escaping in message * Update repo-pr-enforce-base-branch.yml * grrr escape sequences in bash * test * clean up --------- Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- .../workflows/repo-pr-enforce-base-branch.yml | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/repo-pr-enforce-base-branch.yml diff --git a/.github/workflows/repo-pr-enforce-base-branch.yml b/.github/workflows/repo-pr-enforce-base-branch.yml new file mode 100644 index 000000000000..3d4bd9096a52 --- /dev/null +++ b/.github/workflows/repo-pr-enforce-base-branch.yml @@ -0,0 +1,21 @@ +name: Repo - Enforce dev as base branch +on: + pull_request_target: + branches: [ master ] + types: [ opened ] + +jobs: + check_pr_target: + runs-on: ubuntu-latest + permissions: + pull-requests: write + steps: + - name: Check if PR is from dev or hotfix + if: ${{ !(startsWith(github.event.pull_request.head.ref, 'hotfix/') || github.event.pull_request.head.ref == 'dev') }} + run: | + gh pr comment ${{ github.event.number }} --repo "$REPO" \ + --body $'This PR targets the `master` branch but does not come from `dev` or a `hotfix/*` branch.\n\nAutomatically setting the base branch to `dev`.' + gh pr edit ${{ github.event.number }} --base dev --repo "$REPO" + env: + GITHUB_TOKEN: ${{ github.token }} + REPO: ${{ github.repository }} From 7f6354caaee4f1fed045f2b60b4eaba42db367ae Mon Sep 17 00:00:00 2001 From: Kushal Agrawal <98145879+kushal34712@users.noreply.github.com> Date: Thu, 17 Oct 2024 21:56:35 +0530 Subject: [PATCH 3/6] Update README.md (#8319) * Update README.md * Update README.md --------- Co-authored-by: Aarushi <50577581+aarushik93@users.noreply.github.com> --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 2371fa39f8d0..722da4223019 100644 --- a/README.md +++ b/README.md @@ -158,6 +158,8 @@ To maintain a uniform standard and ensure seamless compatibility with many curre --- +## Stars stats +

@@ -167,3 +169,10 @@ To maintain a uniform standard and ensure seamless compatibility with many curre

+ + +## ⚡ Contributors + + + Contributors + From 26b1bca03391172f1bc5bfc6d94aaf2010c7edf3 Mon Sep 17 00:00:00 2001 From: Zamil Majdy Date: Fri, 18 Oct 2024 06:22:05 +0300 Subject: [PATCH 4/6] refactor(backend): Make block fields consistently use SchemaField (#8360) --- .../backend/backend/blocks/__init__.py | 11 +++ .../blocks/ai_shortform_video_block.py | 15 ++-- .../backend/backend/blocks/basic.py | 21 ++--- .../backend/backend/blocks/block.py | 13 ++- .../backend/backend/blocks/csv.py | 50 ++++++++--- .../backend/backend/blocks/discord.py | 21 +++-- .../backend/backend/blocks/email_block.py | 8 +- .../backend/backend/blocks/http.py | 27 ++++-- .../backend/backend/blocks/ideogram.py | 6 -- .../backend/backend/blocks/llm.py | 81 ++++++++++++++---- .../backend/backend/blocks/medium.py | 15 +++- .../backend/backend/blocks/reddit.py | 22 ++--- .../backend/backend/blocks/search.py | 47 +++++++---- .../backend/backend/blocks/talking_head.py | 3 +- .../backend/backend/blocks/text.py | 52 +++++++----- .../backend/backend/blocks/time_blocks.py | 83 ++++++++++++++----- .../backend/backend/blocks/youtube.py | 7 +- .../backend/test/util/test_service.py | 6 +- 18 files changed, 338 insertions(+), 150 deletions(-) diff --git a/autogpt_platform/backend/backend/blocks/__init__.py b/autogpt_platform/backend/backend/blocks/__init__.py index 1fd85aef4630..940956a20b6f 100644 --- a/autogpt_platform/backend/backend/blocks/__init__.py +++ b/autogpt_platform/backend/backend/blocks/__init__.py @@ -69,6 +69,17 @@ def all_subclasses(clz): f"{block.name} `error` field in output_schema must be a string" ) + # Make sure all fields in input_schema and output_schema are annotated and has a value + for field_name, field in [*input_schema.items(), *output_schema.items()]: + if field.annotation is None: + raise ValueError( + f"{block.name} has a field {field_name} that is not annotated" + ) + if field.json_schema_extra is None: + raise ValueError( + f"{block.name} has a field {field_name} not defined as SchemaField" + ) + for field in block.input_schema.model_fields.values(): if field.annotation is bool and field.default not in (True, False): raise ValueError(f"{block.name} has a boolean field with no default value") diff --git a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py index 127bb3ae8b4a..3fe92950c199 100644 --- a/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py +++ b/autogpt_platform/backend/backend/blocks/ai_shortform_video_block.py @@ -3,7 +3,6 @@ from enum import Enum import requests -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField @@ -129,9 +128,13 @@ class Input(BlockSchema): description="""1. Use short and punctuated sentences\n\n2. Use linebreaks to create a new clip\n\n3. Text outside of brackets is spoken by the AI, and [text between brackets] will be used to guide the visual generation. For example, [close-up of a cat] will show a close-up of a cat.""", placeholder="[close-up of a cat] Meow!", ) - ratio: str = Field(description="Aspect ratio of the video", default="9 / 16") - resolution: str = Field(description="Resolution of the video", default="720p") - frame_rate: int = Field(description="Frame rate of the video", default=60) + ratio: str = SchemaField( + description="Aspect ratio of the video", default="9 / 16" + ) + resolution: str = SchemaField( + description="Resolution of the video", default="720p" + ) + frame_rate: int = SchemaField(description="Frame rate of the video", default=60) generation_preset: GenerationPreset = SchemaField( description="Generation preset for visual style - only effects AI generated visuals", default=GenerationPreset.LEONARDO, @@ -154,8 +157,8 @@ class Input(BlockSchema): ) class Output(BlockSchema): - video_url: str = Field(description="The URL of the created video") - error: str = Field(description="Error message if the request failed") + video_url: str = SchemaField(description="The URL of the created video") + error: str = SchemaField(description="Error message if the request failed") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py index 60992e0f4591..391d6b615aad 100644 --- a/autogpt_platform/backend/backend/blocks/basic.py +++ b/autogpt_platform/backend/backend/blocks/basic.py @@ -2,7 +2,6 @@ from typing import Any, List from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema, BlockType from backend.data.model import SchemaField @@ -19,18 +18,18 @@ class StoreValueBlock(Block): """ class Input(BlockSchema): - input: Any = Field( + input: Any = SchemaField( description="Trigger the block to produce the output. " "The value is only used when `data` is None." ) - data: Any = Field( + data: Any = SchemaField( description="The constant data to be retained in the block. " "This value is passed as `output`.", default=None, ) class Output(BlockSchema): - output: Any + output: Any = SchemaField(description="The stored data retained in the block.") def __init__(self): super().__init__( @@ -56,10 +55,10 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PrintToConsoleBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField(description="The text to print to the console.") class Output(BlockSchema): - status: str + status: str = SchemaField(description="The status of the print operation.") def __init__(self): super().__init__( @@ -79,12 +78,14 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FindInDictionaryBlock(Block): class Input(BlockSchema): - input: Any = Field(description="Dictionary to lookup from") - key: str | int = Field(description="Key to lookup in the dictionary") + input: Any = SchemaField(description="Dictionary to lookup from") + key: str | int = SchemaField(description="Key to lookup in the dictionary") class Output(BlockSchema): - output: Any = Field(description="Value found for the given key") - missing: Any = Field(description="Value of the input that missing the key") + output: Any = SchemaField(description="Value found for the given key") + missing: Any = SchemaField( + description="Value of the input that missing the key" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/block.py b/autogpt_platform/backend/backend/blocks/block.py index a4bf8f6ac524..01e8af7238ea 100644 --- a/autogpt_platform/backend/backend/blocks/block.py +++ b/autogpt_platform/backend/backend/blocks/block.py @@ -3,6 +3,7 @@ from typing import Type from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class BlockInstallationBlock(Block): @@ -15,11 +16,17 @@ class BlockInstallationBlock(Block): """ class Input(BlockSchema): - code: str + code: str = SchemaField( + description="Python code of the block to be installed", + ) class Output(BlockSchema): - success: str - error: str + success: str = SchemaField( + description="Success message if the block is installed successfully", + ) + error: str = SchemaField( + description="Error message if the block installation fails", + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/csv.py b/autogpt_platform/backend/backend/blocks/csv.py index b53f6c5ac889..e78c8994737a 100644 --- a/autogpt_platform/backend/backend/blocks/csv.py +++ b/autogpt_platform/backend/backend/blocks/csv.py @@ -1,21 +1,49 @@ from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import ContributorDetails +from backend.data.model import ContributorDetails, SchemaField class ReadCsvBlock(Block): class Input(BlockSchema): - contents: str - delimiter: str = "," - quotechar: str = '"' - escapechar: str = "\\" - has_header: bool = True - skip_rows: int = 0 - strip: bool = True - skip_columns: list[str] = [] + contents: str = SchemaField( + description="The contents of the CSV file to read", + placeholder="a, b, c\n1,2,3\n4,5,6", + ) + delimiter: str = SchemaField( + description="The delimiter used in the CSV file", + default=",", + ) + quotechar: str = SchemaField( + description="The character used to quote fields", + default='"', + ) + escapechar: str = SchemaField( + description="The character used to escape the delimiter", + default="\\", + ) + has_header: bool = SchemaField( + description="Whether the CSV file has a header row", + default=True, + ) + skip_rows: int = SchemaField( + description="The number of rows to skip from the start of the file", + default=0, + ) + strip: bool = SchemaField( + description="Whether to strip whitespace from the values", + default=True, + ) + skip_columns: list[str] = SchemaField( + description="The columns to skip from the start of the row", + default=[], + ) class Output(BlockSchema): - row: dict[str, str] - all_data: list[dict[str, str]] + row: dict[str, str] = SchemaField( + description="The data produced from each row in the CSV file" + ) + all_data: list[dict[str, str]] = SchemaField( + description="All the data in the CSV file as a list of rows" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/discord.py b/autogpt_platform/backend/backend/blocks/discord.py index cd350a619916..e5414cd32727 100644 --- a/autogpt_platform/backend/backend/blocks/discord.py +++ b/autogpt_platform/backend/backend/blocks/discord.py @@ -2,10 +2,9 @@ import aiohttp import discord -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField class ReadDiscordMessagesBlock(Block): @@ -13,16 +12,18 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - continuous_read: bool = Field( + continuous_read: bool = SchemaField( description="Whether to continuously read messages", default=True ) class Output(BlockSchema): - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) - username: str = Field( + username: str = SchemaField( description="The username of the user who sent the message" ) @@ -134,13 +135,15 @@ class Input(BlockSchema): discord_bot_token: BlockSecret = SecretField( key="discord_bot_token", description="Discord bot token" ) - message_content: str = Field(description="The content of the message received") - channel_name: str = Field( + message_content: str = SchemaField( + description="The content of the message received" + ) + channel_name: str = SchemaField( description="The name of the channel the message was received from" ) class Output(BlockSchema): - status: str = Field( + status: str = SchemaField( description="The status of the operation (e.g., 'Message sent', 'Error')" ) diff --git a/autogpt_platform/backend/backend/blocks/email_block.py b/autogpt_platform/backend/backend/blocks/email_block.py index a7f0f82dcee7..79accb6d7d35 100644 --- a/autogpt_platform/backend/backend/blocks/email_block.py +++ b/autogpt_platform/backend/backend/blocks/email_block.py @@ -2,17 +2,17 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema from backend.data.model import BlockSecret, SchemaField, SecretField class EmailCredentials(BaseModel): - smtp_server: str = Field( + smtp_server: str = SchemaField( default="smtp.gmail.com", description="SMTP server address" ) - smtp_port: int = Field(default=25, description="SMTP port number") + smtp_port: int = SchemaField(default=25, description="SMTP port number") smtp_username: BlockSecret = SecretField(key="smtp_username") smtp_password: BlockSecret = SecretField(key="smtp_password") @@ -30,7 +30,7 @@ class Input(BlockSchema): body: str = SchemaField( description="Body of the email", placeholder="Enter the email body" ) - creds: EmailCredentials = Field( + creds: EmailCredentials = SchemaField( description="SMTP credentials", default=EmailCredentials(), ) diff --git a/autogpt_platform/backend/backend/blocks/http.py b/autogpt_platform/backend/backend/blocks/http.py index 04d893f84701..74a1d3d0bb0b 100644 --- a/autogpt_platform/backend/backend/blocks/http.py +++ b/autogpt_platform/backend/backend/blocks/http.py @@ -4,6 +4,7 @@ import requests from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class HttpMethod(Enum): @@ -18,15 +19,27 @@ class HttpMethod(Enum): class SendWebRequestBlock(Block): class Input(BlockSchema): - url: str - method: HttpMethod = HttpMethod.POST - headers: dict[str, str] = {} - body: object = {} + url: str = SchemaField( + description="The URL to send the request to", + placeholder="https://api.example.com", + ) + method: HttpMethod = SchemaField( + description="The HTTP method to use for the request", + default=HttpMethod.POST, + ) + headers: dict[str, str] = SchemaField( + description="The headers to include in the request", + default={}, + ) + body: object = SchemaField( + description="The body of the request", + default={}, + ) class Output(BlockSchema): - response: object - client_error: object - server_error: object + response: object = SchemaField(description="The response from the server") + client_error: object = SchemaField(description="The error on 4xx status codes") + server_error: object = SchemaField(description="The error on 5xx status codes") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/ideogram.py b/autogpt_platform/backend/backend/blocks/ideogram.py index 66dd22061447..6818a25371e2 100644 --- a/autogpt_platform/backend/backend/blocks/ideogram.py +++ b/autogpt_platform/backend/backend/blocks/ideogram.py @@ -75,28 +75,24 @@ class Input(BlockSchema): description="The name of the Image Generation Model, e.g., V_2", default=IdeogramModelName.V2, title="Image Generation Model", - enum=IdeogramModelName, advanced=False, ) aspect_ratio: AspectRatio = SchemaField( description="Aspect ratio for the generated image", default=AspectRatio.ASPECT_1_1, title="Aspect Ratio", - enum=AspectRatio, advanced=False, ) upscale: UpscaleOption = SchemaField( description="Upscale the generated image", default=UpscaleOption.NO_UPSCALE, title="Upscale Image", - enum=UpscaleOption, advanced=False, ) magic_prompt_option: MagicPromptOption = SchemaField( description="Whether to use MagicPrompt for enhancing the request", default=MagicPromptOption.AUTO, title="Magic Prompt Option", - enum=MagicPromptOption, advanced=True, ) seed: Optional[int] = SchemaField( @@ -109,7 +105,6 @@ class Input(BlockSchema): description="Style type to apply, applicable for V_2 and above", default=StyleType.AUTO, title="Style Type", - enum=StyleType, advanced=True, ) negative_prompt: Optional[str] = SchemaField( @@ -122,7 +117,6 @@ class Input(BlockSchema): description="Color palette preset name, choose 'None' to skip", default=ColorPalettePreset.NONE, title="Color Palette Preset", - enum=ColorPalettePreset, advanced=True, ) diff --git a/autogpt_platform/backend/backend/blocks/llm.py b/autogpt_platform/backend/backend/blocks/llm.py index a55dbdf10565..daec5b55d392 100644 --- a/autogpt_platform/backend/backend/blocks/llm.py +++ b/autogpt_platform/backend/backend/blocks/llm.py @@ -124,7 +124,10 @@ def cost_factor(self) -> int: class AIStructuredResponseGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) expected_format: dict[str, str] = SchemaField( description="Expected format of the response. If provided, the response will be validated against this format. " "The keys should be the expected fields in the response, and the values should be the description of the field.", @@ -136,15 +139,25 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) class Output(BlockSchema): - response: dict[str, Any] - error: str + response: dict[str, Any] = SchemaField( + description="The response object generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -349,7 +362,10 @@ def parse_response(resp: str) -> tuple[dict[str, Any], str | None]: class AITextGeneratorBlock(Block): class Input(BlockSchema): - prompt: str + prompt: str = SchemaField( + description="The prompt to send to the language model.", + placeholder="Enter your prompt here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, @@ -357,15 +373,25 @@ class Input(BlockSchema): advanced=False, ) api_key: BlockSecret = SecretField(value="") - sys_prompt: str = "" - retry: int = 3 + sys_prompt: str = SchemaField( + title="System Prompt", + default="", + description="The system prompt to provide additional context to the model.", + ) + retry: int = SchemaField( + title="Retry Count", + default=3, + description="Number of times to retry the LLM call if the response does not match the expected format.", + ) prompt_values: dict[str, str] = SchemaField( advanced=False, default={}, description="Values used to fill in the prompt." ) class Output(BlockSchema): - response: str - error: str + response: str = SchemaField( + description="The response generated by the language model." + ) + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( @@ -406,22 +432,43 @@ class SummaryStyle(Enum): class AITextSummarizerBlock(Block): class Input(BlockSchema): - text: str + text: str = SchemaField( + description="The text to summarize.", + placeholder="Enter the text to summarize here...", + ) model: LlmModel = SchemaField( title="LLM Model", default=LlmModel.GPT4_TURBO, description="The language model to use for summarizing the text.", ) - focus: str = "general information" - style: SummaryStyle = SummaryStyle.CONCISE + focus: str = SchemaField( + title="Focus", + default="general information", + description="The topic to focus on in the summary", + ) + style: SummaryStyle = SchemaField( + title="Summary Style", + default=SummaryStyle.CONCISE, + description="The style of the summary to generate.", + ) api_key: BlockSecret = SecretField(value="") # TODO: Make this dynamic - max_tokens: int = 4000 # Adjust based on the model's context window - chunk_overlap: int = 100 # Overlap between chunks to maintain context + max_tokens: int = SchemaField( + title="Max Tokens", + default=4096, + description="The maximum number of tokens to generate in the chat completion.", + ge=1, + ) + chunk_overlap: int = SchemaField( + title="Chunk Overlap", + default=100, + description="The number of overlapping tokens between chunks to maintain context.", + ge=0, + ) class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The final summary of the text.") + error: str = SchemaField(description="Error message if the API call failed.") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/medium.py b/autogpt_platform/backend/backend/blocks/medium.py index 04ebe8fab012..1d85e0978082 100644 --- a/autogpt_platform/backend/backend/blocks/medium.py +++ b/autogpt_platform/backend/backend/blocks/medium.py @@ -1,3 +1,4 @@ +from enum import Enum from typing import List import requests @@ -6,6 +7,12 @@ from backend.data.model import BlockSecret, SchemaField, SecretField +class PublishToMediumStatus(str, Enum): + PUBLIC = "public" + DRAFT = "draft" + UNLISTED = "unlisted" + + class PublishToMediumBlock(Block): class Input(BlockSchema): author_id: BlockSecret = SecretField( @@ -34,9 +41,9 @@ class Input(BlockSchema): description="The original home of this content, if it was originally published elsewhere", placeholder="https://yourblog.com/original-post", ) - publish_status: str = SchemaField( - description="The publish status: 'public', 'draft', or 'unlisted'", - placeholder="public", + publish_status: PublishToMediumStatus = SchemaField( + description="The publish status", + placeholder=PublishToMediumStatus.DRAFT, ) license: str = SchemaField( default="all-rights-reserved", @@ -79,7 +86,7 @@ def __init__(self): "tags": ["test", "automation"], "license": "all-rights-reserved", "notify_followers": False, - "publish_status": "draft", + "publish_status": PublishToMediumStatus.DRAFT.value, "api_key": "your_test_api_key", }, test_output=[ diff --git a/autogpt_platform/backend/backend/blocks/reddit.py b/autogpt_platform/backend/backend/blocks/reddit.py index 065436ae7321..9e4f3f3aca0b 100644 --- a/autogpt_platform/backend/backend/blocks/reddit.py +++ b/autogpt_platform/backend/backend/blocks/reddit.py @@ -2,10 +2,10 @@ from typing import Iterator import praw -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema -from backend.data.model import BlockSecret, SecretField +from backend.data.model import BlockSecret, SchemaField, SecretField from backend.util.mock import MockObject @@ -48,25 +48,25 @@ def get_praw(creds: RedditCredentials) -> praw.Reddit: class GetRedditPostsBlock(Block): class Input(BlockSchema): - subreddit: str = Field(description="Subreddit name") - creds: RedditCredentials = Field( + subreddit: str = SchemaField(description="Subreddit name") + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials(), ) - last_minutes: int | None = Field( + last_minutes: int | None = SchemaField( description="Post time to stop minutes ago while fetching posts", default=None, ) - last_post: str | None = Field( + last_post: str | None = SchemaField( description="Post ID to stop when reached while fetching posts", default=None, ) - post_limit: int | None = Field( + post_limit: int | None = SchemaField( description="Number of posts to fetch", default=10 ) class Output(BlockSchema): - post: RedditPost = Field(description="Reddit post") + post: RedditPost = SchemaField(description="Reddit post") def __init__(self): super().__init__( @@ -140,13 +140,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class PostRedditCommentBlock(Block): class Input(BlockSchema): - creds: RedditCredentials = Field( + creds: RedditCredentials = SchemaField( description="Reddit credentials", default=RedditCredentials() ) - data: RedditComment = Field(description="Reddit comment") + data: RedditComment = SchemaField(description="Reddit comment") class Output(BlockSchema): - comment_id: str + comment_id: str = SchemaField(description="Posted comment ID") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/search.py b/autogpt_platform/backend/backend/blocks/search.py index ecd63e2ee658..27a4322ce6a7 100644 --- a/autogpt_platform/backend/backend/blocks/search.py +++ b/autogpt_platform/backend/backend/blocks/search.py @@ -17,11 +17,13 @@ def get_request(cls, url: str, json=False) -> Any: class GetWikipediaSummaryBlock(Block, GetRequest): class Input(BlockSchema): - topic: str + topic: str = SchemaField(description="The topic to fetch the summary for") class Output(BlockSchema): - summary: str - error: str + summary: str = SchemaField(description="The summary of the given topic") + error: str = SchemaField( + description="Error message if the summary cannot be retrieved" + ) def __init__(self): super().__init__( @@ -46,11 +48,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class SearchTheWebBlock(Block, GetRequest): class Input(BlockSchema): - query: str # The search query + query: str = SchemaField(description="The search query to search the web for") class Output(BlockSchema): - results: str # The search results including content from top 5 URLs - error: str # Error message if the search fails + results: str = SchemaField( + description="The search results including content from top 5 URLs" + ) + error: str = SchemaField(description="Error message if the search fails") def __init__(self): super().__init__( @@ -80,7 +84,7 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractWebsiteContentBlock(Block, GetRequest): class Input(BlockSchema): - url: str # The URL to scrape + url: str = SchemaField(description="The URL to scrape the content from") raw_content: bool = SchemaField( default=False, title="Raw Content", @@ -89,8 +93,10 @@ class Input(BlockSchema): ) class Output(BlockSchema): - content: str # The scraped content from the URL - error: str + content: str = SchemaField(description="The scraped content from the given URL") + error: str = SchemaField( + description="Error message if the content cannot be retrieved" + ) def __init__(self): super().__init__( @@ -116,15 +122,26 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class GetWeatherInformationBlock(Block, GetRequest): class Input(BlockSchema): - location: str + location: str = SchemaField( + description="Location to get weather information for" + ) api_key: BlockSecret = SecretField(key="openweathermap_api_key") - use_celsius: bool = True + use_celsius: bool = SchemaField( + default=True, + description="Whether to use Celsius or Fahrenheit for temperature", + ) class Output(BlockSchema): - temperature: str - humidity: str - condition: str - error: str + temperature: str = SchemaField( + description="Temperature in the specified location" + ) + humidity: str = SchemaField(description="Humidity in the specified location") + condition: str = SchemaField( + description="Weather condition in the specified location" + ) + error: str = SchemaField( + description="Error message if the weather information cannot be retrieved" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/talking_head.py b/autogpt_platform/backend/backend/blocks/talking_head.py index e93b69ed8547..f4497d85ffab 100644 --- a/autogpt_platform/backend/backend/blocks/talking_head.py +++ b/autogpt_platform/backend/backend/blocks/talking_head.py @@ -13,7 +13,8 @@ class Input(BlockSchema): key="did_api_key", description="D-ID API Key" ) script_input: str = SchemaField( - description="The text input for the script", default="Welcome to AutoGPT" + description="The text input for the script", + placeholder="Welcome to AutoGPT", ) provider: Literal["microsoft", "elevenlabs", "amazon"] = SchemaField( description="The voice provider to use", default="microsoft" diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py index da287b94fa5f..1d8c050fa147 100644 --- a/autogpt_platform/backend/backend/blocks/text.py +++ b/autogpt_platform/backend/backend/blocks/text.py @@ -2,9 +2,9 @@ from typing import Any from jinja2 import BaseLoader, Environment -from pydantic import Field from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField from backend.util import json jinja = Environment(loader=BaseLoader()) @@ -12,15 +12,17 @@ class MatchTextPatternBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to match") - match: str = Field(description="Pattern (Regex) to match") - data: Any = Field(description="Data to be forwarded to output") - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to match") + match: str = SchemaField(description="Pattern (Regex) to match") + data: Any = SchemaField(description="Data to be forwarded to output") + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: Any = Field(description="Output data if match is found") - negative: Any = Field(description="Output data if match is not found") + positive: Any = SchemaField(description="Output data if match is found") + negative: Any = SchemaField(description="Output data if match is not found") def __init__(self): super().__init__( @@ -64,15 +66,17 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class ExtractTextInformationBlock(Block): class Input(BlockSchema): - text: Any = Field(description="Text to parse") - pattern: str = Field(description="Pattern (Regex) to parse") - group: int = Field(description="Group number to extract", default=0) - case_sensitive: bool = Field(description="Case sensitive match", default=True) - dot_all: bool = Field(description="Dot matches all", default=True) + text: Any = SchemaField(description="Text to parse") + pattern: str = SchemaField(description="Pattern (Regex) to parse") + group: int = SchemaField(description="Group number to extract", default=0) + case_sensitive: bool = SchemaField( + description="Case sensitive match", default=True + ) + dot_all: bool = SchemaField(description="Dot matches all", default=True) class Output(BlockSchema): - positive: str = Field(description="Extracted text") - negative: str = Field(description="Original text") + positive: str = SchemaField(description="Extracted text") + negative: str = SchemaField(description="Original text") def __init__(self): super().__init__( @@ -116,11 +120,15 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class FillTextTemplateBlock(Block): class Input(BlockSchema): - values: dict[str, Any] = Field(description="Values (dict) to be used in format") - format: str = Field(description="Template to format the text using `values`") + values: dict[str, Any] = SchemaField( + description="Values (dict) to be used in format" + ) + format: str = SchemaField( + description="Template to format the text using `values`" + ) class Output(BlockSchema): - output: str + output: str = SchemaField(description="Formatted text") def __init__(self): super().__init__( @@ -155,11 +163,13 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: class CombineTextsBlock(Block): class Input(BlockSchema): - input: list[str] = Field(description="text input to combine") - delimiter: str = Field(description="Delimiter to combine texts", default="") + input: list[str] = SchemaField(description="text input to combine") + delimiter: str = SchemaField( + description="Delimiter to combine texts", default="" + ) class Output(BlockSchema): - output: str = Field(description="Combined text") + output: str = SchemaField(description="Combined text") def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/time_blocks.py b/autogpt_platform/backend/backend/blocks/time_blocks.py index 9e95d428b9cc..eb886b5352c8 100644 --- a/autogpt_platform/backend/backend/blocks/time_blocks.py +++ b/autogpt_platform/backend/backend/blocks/time_blocks.py @@ -3,14 +3,22 @@ from typing import Any, Union from backend.data.block import Block, BlockCategory, BlockOutput, BlockSchema +from backend.data.model import SchemaField class GetCurrentTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current time" + ) + format: str = SchemaField( + description="Format of the time to output", default="%H:%M:%S" + ) class Output(BlockSchema): - time: str + time: str = SchemaField( + description="Current time in the specified format (default: %H:%M:%S)" + ) def __init__(self): super().__init__( @@ -20,25 +28,38 @@ def __init__(self): input_schema=GetCurrentTimeBlock.Input, output_schema=GetCurrentTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{time}"}, + {"trigger": "Hello"}, + {"trigger": "Hello", "format": "%H:%M"}, ], test_output=[ ("time", lambda _: time.strftime("%H:%M:%S")), + ("time", lambda _: time.strftime("%H:%M")), ], ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_time = time.strftime("%H:%M:%S") + current_time = time.strftime(input_data.format) yield "time", current_time class GetCurrentDateBlock(Block): class Input(BlockSchema): - trigger: str - offset: Union[int, str] + trigger: str = SchemaField( + description="Trigger any data to output the current date" + ) + offset: Union[int, str] = SchemaField( + title="Days Offset", + description="Offset in days from the current date", + default=0, + ) + format: str = SchemaField( + description="Format of the date to output", default="%Y-%m-%d" + ) class Output(BlockSchema): - date: str + date: str = SchemaField( + description="Current date in the specified format (default: YYYY-MM-DD)" + ) def __init__(self): super().__init__( @@ -48,7 +69,8 @@ def __init__(self): input_schema=GetCurrentDateBlock.Input, output_schema=GetCurrentDateBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date}", "offset": "7"}, + {"trigger": "Hello", "offset": "7"}, + {"trigger": "Hello", "offset": "7", "format": "%m/%d/%Y"}, ], test_output=[ ( @@ -56,6 +78,12 @@ def __init__(self): lambda t: abs(datetime.now() - datetime.strptime(t, "%Y-%m-%d")) < timedelta(days=8), # 7 days difference + 1 day error margin. ), + ( + "date", + lambda t: abs(datetime.now() - datetime.strptime(t, "%m/%d/%Y")) + < timedelta(days=8), + # 7 days difference + 1 day error margin. + ), ], ) @@ -65,15 +93,23 @@ def run(self, input_data: Input, **kwargs) -> BlockOutput: except ValueError: offset = 0 current_date = datetime.now() - timedelta(days=offset) - yield "date", current_date.strftime("%Y-%m-%d") + yield "date", current_date.strftime(input_data.format) class GetCurrentDateAndTimeBlock(Block): class Input(BlockSchema): - trigger: str + trigger: str = SchemaField( + description="Trigger any data to output the current date and time" + ) + format: str = SchemaField( + description="Format of the date and time to output", + default="%Y-%m-%d %H:%M:%S", + ) class Output(BlockSchema): - date_time: str + date_time: str = SchemaField( + description="Current date and time in the specified format (default: YYYY-MM-DD HH:MM:SS)" + ) def __init__(self): super().__init__( @@ -83,7 +119,7 @@ def __init__(self): input_schema=GetCurrentDateAndTimeBlock.Input, output_schema=GetCurrentDateAndTimeBlock.Output, test_input=[ - {"trigger": "Hello", "format": "{date_time}"}, + {"trigger": "Hello"}, ], test_output=[ ( @@ -97,20 +133,29 @@ def __init__(self): ) def run(self, input_data: Input, **kwargs) -> BlockOutput: - current_date_time = time.strftime("%Y-%m-%d %H:%M:%S") + current_date_time = time.strftime(input_data.format) yield "date_time", current_date_time class CountdownTimerBlock(Block): class Input(BlockSchema): - input_message: Any = "timer finished" - seconds: Union[int, str] = 0 - minutes: Union[int, str] = 0 - hours: Union[int, str] = 0 - days: Union[int, str] = 0 + input_message: Any = SchemaField( + description="Message to output after the timer finishes", + default="timer finished", + ) + seconds: Union[int, str] = SchemaField( + description="Duration in seconds", default=0 + ) + minutes: Union[int, str] = SchemaField( + description="Duration in minutes", default=0 + ) + hours: Union[int, str] = SchemaField(description="Duration in hours", default=0) + days: Union[int, str] = SchemaField(description="Duration in days", default=0) class Output(BlockSchema): - output_message: str + output_message: str = SchemaField( + description="Message after the timer finishes" + ) def __init__(self): super().__init__( diff --git a/autogpt_platform/backend/backend/blocks/youtube.py b/autogpt_platform/backend/backend/blocks/youtube.py index cec50109bd4d..b4f0259d98b1 100644 --- a/autogpt_platform/backend/backend/blocks/youtube.py +++ b/autogpt_platform/backend/backend/blocks/youtube.py @@ -7,9 +7,10 @@ from backend.data.model import SchemaField -class TranscribeYouTubeVideoBlock(Block): +class TranscribeYoutubeVideoBlock(Block): class Input(BlockSchema): youtube_url: str = SchemaField( + title="YouTube URL", description="The URL of the YouTube video to transcribe", placeholder="https://www.youtube.com/watch?v=dQw4w9WgXcQ", ) @@ -24,8 +25,8 @@ class Output(BlockSchema): def __init__(self): super().__init__( id="f3a8f7e1-4b1d-4e5f-9f2a-7c3d5a2e6b4c", - input_schema=TranscribeYouTubeVideoBlock.Input, - output_schema=TranscribeYouTubeVideoBlock.Output, + input_schema=TranscribeYoutubeVideoBlock.Input, + output_schema=TranscribeYoutubeVideoBlock.Output, description="Transcribes a YouTube video.", categories={BlockCategory.SOCIAL}, test_input={"youtube_url": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"}, diff --git a/autogpt_platform/backend/test/util/test_service.py b/autogpt_platform/backend/test/util/test_service.py index 458e18b76d3a..e03063fff32d 100644 --- a/autogpt_platform/backend/test/util/test_service.py +++ b/autogpt_platform/backend/test/util/test_service.py @@ -5,7 +5,7 @@ TEST_SERVICE_PORT = 8765 -class TestService(AppService): +class ServiceTest(AppService): def __init__(self): super().__init__(port=TEST_SERVICE_PORT) @@ -27,8 +27,8 @@ async def add_async(a: int, b: int) -> int: @pytest.mark.asyncio(scope="session") async def test_service_creation(server): - with TestService(): - client = get_service_client(TestService, TEST_SERVICE_PORT) + with ServiceTest(): + client = get_service_client(ServiceTest, TEST_SERVICE_PORT) assert client.add(5, 3) == 8 assert client.subtract(10, 4) == 6 assert client.fun_with_async(5, 3) == 8 From 6f3828fc994a8441ac4a860fb050def2da18042e Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:49:56 +0100 Subject: [PATCH 5/6] fix(dockercompose): Fix db manager connection (#8377) * add db host * remove unused variable --- autogpt_platform/backend/backend/util/settings.py | 5 ----- autogpt_platform/docker-compose.platform.yml | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 6e552fadb83d..1ac875fdf92a 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -117,11 +117,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for agent server daemon to run on", ) - database_api_host: str = Field( - default="0.0.0.0", - description="The host for database server API to run on", - ) - database_api_port: int = Field( default=8005, description="The port for database server API to run on", diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index a0b8f670acd6..020f32201fdc 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -103,6 +103,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - DATABASEMANAGER_HOST=0.0.0.0 ports: - "8002:8000" networks: From 68158de126a0574fba9f0e824f5d89963b7dc8da Mon Sep 17 00:00:00 2001 From: Aarushi <50577581+aarushik93@users.noreply.github.com> Date: Fri, 18 Oct 2024 13:49:56 +0100 Subject: [PATCH 6/6] fix(dockercompose): Fix db manager connection (#8377) * add db host * remove unused variable --- autogpt_platform/backend/backend/util/settings.py | 5 ----- autogpt_platform/docker-compose.platform.yml | 1 + 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py index 6e552fadb83d..1ac875fdf92a 100644 --- a/autogpt_platform/backend/backend/util/settings.py +++ b/autogpt_platform/backend/backend/util/settings.py @@ -117,11 +117,6 @@ class Config(UpdateTrackingModel["Config"], BaseSettings): description="The port for agent server daemon to run on", ) - database_api_host: str = Field( - default="0.0.0.0", - description="The host for database server API to run on", - ) - database_api_port: int = Field( default=8005, description="The port for database server API to run on", diff --git a/autogpt_platform/docker-compose.platform.yml b/autogpt_platform/docker-compose.platform.yml index a0b8f670acd6..020f32201fdc 100644 --- a/autogpt_platform/docker-compose.platform.yml +++ b/autogpt_platform/docker-compose.platform.yml @@ -103,6 +103,7 @@ services: - ENABLE_AUTH=true - PYRO_HOST=0.0.0.0 - AGENTSERVER_HOST=rest_server + - DATABASEMANAGER_HOST=0.0.0.0 ports: - "8002:8000" networks: