diff --git a/README.md b/README.md index 4dd80d7..d62be13 100644 --- a/README.md +++ b/README.md @@ -37,13 +37,13 @@ There is a detailed [tutorial](https://github.com/haesleinhuepf/git-bob/blob/mai * Make sure to replace `pip install -e .` with a specific git-bob version such as `pip install git-bob==0.16.0`. * If your project does not contain a `requirements.txt` file, remove the line `pip install -r requirements.txt`. * Configure the LLM you want to use in the workflow files by specifying the `GIT_BOB_LLM_NAME` environment variable. These were tested: - * `claude-3-5-sonnet-20241022` - * `gpt-4o-2024-08-06` + * `anthropic:claude-3-5-sonnet-20241022` + * `openai:gpt-4o-2024-08-06` * `github_models:gpt-4o` * `github_models:meta-llama-3.1-405b-instruct` - * `gemini-1.5-pro-002` - * `mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks) - * `deepseek-chat` + * `google:gemini-1.5-pro-002` + * `mistral:mistral-large-2411` (uses `pixtral-12b-2409` for vision tasks) + * `mistral:deepseek-chat` * configure a GitHub secret with the corresponding key from the LLM provider depending on the above configured LLM: * `OPENAI_API_KEY`: [OpenAI (gpt)](https://openai.com/blog/openai-api) * `ANTHROPIC_API_KEY`: [Anthropic (claude)](https://www.anthropic.com/api) diff --git a/setup.cfg b/setup.cfg index 6558a50..1bc06c6 100644 --- a/setup.cfg +++ b/setup.cfg @@ -54,10 +54,13 @@ console_scripts = git-bob-remote = git_bob._terminal:remote_interface git_bob.prompt_handlers = + openai = git_bob._endpoints:prompt_openai gpt-4o = git_bob._endpoints:prompt_openai gpt4o = git_bob._endpoints:prompt_openai o1 = git_bob._endpoints:prompt_openai + anthropic = git_bob._endpoints:prompt_anthropic claude = git_bob._endpoints:prompt_anthropic + google = git_bob._endpoints:prompt_googleai gemini = git_bob._endpoints:prompt_googleai mistral = git_bob._endpoints:prompt_mistral pixtral = git_bob._endpoints:prompt_mistral diff --git a/src/git_bob/__init__.py b/src/git_bob/__init__.py index 5c38579..efd7214 100644 --- a/src/git_bob/__init__.py +++ b/src/git_bob/__init__.py @@ -1,4 +1,4 @@ -__version__ = "0.18.1" +__version__ = "0.19.0" __all__ = ( diff --git a/src/git_bob/_terminal.py b/src/git_bob/_terminal.py index b4458a9..0962495 100644 --- a/src/git_bob/_terminal.py +++ b/src/git_bob/_terminal.py @@ -48,7 +48,7 @@ def command_line_interface(): if value is not None: try: signature = inspect.signature(value) - model_aliases[key] = signature.parameters['model'].default + model_aliases[key] = key + ":" + signature.parameters['model'].default except: continue print("model aliases:\n", model_aliases) @@ -106,7 +106,7 @@ def handler(signum, frame): print("text: ", text) print(f"{agent_name} ask in text", f"{agent_name} ask" in text) - # handle ask-llm task option + # handle ask-llm task option (using model names or aliases to select the LLM) if f"{agent_name} ask" in text: # example: # git-bob ask gpt-4o to solve this issue -> git-bob solve this issue @@ -125,10 +125,21 @@ def handler(signum, frame): prompt_function = None prompt_handlers = init_prompt_handlers() # reinitialize, because configured LLM may have changed - for key, value in prompt_handlers.items(): - if key in Config.llm_name: - prompt_function = partial(value, model=Config.llm_name) - break + + # search for the leading model provider (left of : ) + if ":" in Config.llm_name: + provider = Config.llm_name.split(":")[0] + for key, value in prompt_handlers.items(): + if key == provider: + Log().log(f"Selecting prompt handler by provider name ({provider}): " + value.__name__) + prompt_function = partial(value, model=Config.llm_name) + break + else: + for key, value in prompt_handlers.items(): + if key in Config.llm_name: + Log().log("Selecting prompt handler by llm_name: " + value.__name__) + prompt_function = partial(value, model=Config.llm_name) + break if prompt_function is None: llm_name = Config.llm_name[1:]