From 6236c6cf218a9205caac3ff09a1ed9b97514d5ee Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Sun, 16 Feb 2025 10:47:57 +0100 Subject: [PATCH 1/5] recommend using model provider prefixes before model name --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4dd80d7..d62be13 100644 --- a/README.md +++ b/README.md @@ -37,13 +37,13 @@ There is a detailed [tutorial](https://github.com/haesleinhuepf/git-bob/blob/mai * Make sure to replace `pip install -e .` with a specific git-bob version such as `pip install git-bob==0.16.0`. * If your project does not contain a `requirements.txt` file, remove the line `pip install -r requirements.txt`. * Configure the LLM you want to use in the workflow files by specifying the `GIT_BOB_LLM_NAME` environment variable. These were tested: - * `claude-3-5-sonnet-20241022` - * `gpt-4o-2024-08-06` + * `anthropic:claude-3-5-sonnet-20241022` + * `openai:gpt-4o-2024-08-06` * `github_models:gpt-4o` * `github_models:meta-llama-3.1-405b-instruct` - * `gemini-1.5-pro-002` - * `mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks) - * `deepseek-chat` + * `google:gemini-1.5-pro-002` + * `mistral:mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks) + * `mistral:deepseek-chat` * configure a GitHub secret with the corresponding key from the LLM provider depending on the above configured LLM: * `OPENAI_API_KEY`: [OpenAI (gpt)](https://openai.com/blog/openai-api) * `ANTHROPIC_API_KEY`: [Anthropic (claude)](https://www.anthropic.com/api) From 1e4963f45f6d19bf85e88f03c0919c09b8fc734d Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Sun, 16 Feb 2025 10:48:31 +0100 Subject: [PATCH 2/5] add provider names as prompt handlers --- setup.cfg | 3 +++ 1 file changed, 3 insertions(+) diff --git a/setup.cfg b/setup.cfg index 6558a50..1bc06c6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -54,10 +54,13 @@ console_scripts = git-bob-remote = git_bob._terminal:remote_interface git_bob.prompt_handlers = + openai = git_bob._endpoints:prompt_openai gpt-4o = git_bob._endpoints:prompt_openai gpt4o = git_bob._endpoints:prompt_openai o1 = git_bob._endpoints:prompt_openai + anthropic = git_bob._endpoints:prompt_anthropic claude = git_bob._endpoints:prompt_anthropic + google = git_bob._endpoints:prompt_googleai gemini = git_bob._endpoints:prompt_googleai mistral = git_bob._endpoints:prompt_mistral pixtral = git_bob._endpoints:prompt_mistral From 7e62fd629133e5792e2a9dab5ee2299d1809bd74 Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Sun, 16 Feb 2025 10:58:44 +0100 Subject: [PATCH 3/5] support selecting model by provider prefix --- src/git_bob/_terminal.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/src/git_bob/_terminal.py b/src/git_bob/_terminal.py index b4458a9..9617300 100644 --- a/src/git_bob/_terminal.py +++ b/src/git_bob/_terminal.py @@ -48,7 +48,7 @@ def command_line_interface(): if value is not None: try: signature = inspect.signature(value) - model_aliases[key] = signature.parameters['model'].default + model_aliases[key] = key + ":" + signature.parameters['model'].default except: continue print("model aliases:\n", model_aliases) @@ -106,7 +106,7 @@ def handler(signum, frame): print("text: ", text) print(f"{agent_name} ask in text", f"{agent_name} ask" in text) - # handle ask-llm task option + # handle ask-llm task option (using model names or aliases to select the LLM) if f"{agent_name} ask" in text: # example: # git-bob ask gpt-4o to solve this issue -> git-bob solve this issue @@ -125,10 +125,19 @@ def handler(signum, frame): prompt_function = None prompt_handlers = init_prompt_handlers() # reinitialize, because configured LLM may have changed - for key, value in prompt_handlers.items(): - if key in Config.llm_name: - prompt_function = partial(value, model=Config.llm_name) - break + + # search for the leading model provider (left of : ) + if ":" in Config.llm_name: + provider = Config.llm_name.split(":")[0] + for key, value in prompt_handlers.items(): + if key == provider: + prompt_function = partial(value, model=Config.llm_name) + break + else: + for key, value in prompt_handlers.items(): + if key in Config.llm_name: + prompt_function = partial(value, model=Config.llm_name) + break if prompt_function is None: llm_name = Config.llm_name[1:] From 763e8b7f1fb74b2c04a68052070e95b20ba7d645 Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Sun, 16 Feb 2025 10:59:38 +0100 Subject: [PATCH 4/5] bump version --- src/git_bob/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/git_bob/__init__.py b/src/git_bob/__init__.py index 5c38579..efd7214 100644 --- a/src/git_bob/__init__.py +++ b/src/git_bob/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.18.1" +__version__ = "0.19.0" __all__ = ( From 314ba53d37f49db2e0e44e81aa49a7a6ad86d70b Mon Sep 17 00:00:00 2001 From: Robert Haase Date: Sun, 16 Feb 2025 11:01:30 +0100 Subject: [PATCH 5/5] debug tracing --- src/git_bob/_terminal.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/git_bob/_terminal.py b/src/git_bob/_terminal.py index 9617300..0962495 100644 --- a/src/git_bob/_terminal.py +++ b/src/git_bob/_terminal.py @@ -131,11 +131,13 @@ def handler(signum, frame): provider = Config.llm_name.split(":")[0] for key, value in prompt_handlers.items(): if key == provider: + Log().log(f"Selecting prompt handler by provider name ({provider}): " + value.__name__) prompt_function = partial(value, model=Config.llm_name) break else: for key, value in prompt_handlers.items(): if key in Config.llm_name: + Log().log("Selecting prompt handler by llm_name: " + value.__name__) prompt_function = partial(value, model=Config.llm_name) break