Skip to content

Commit

Permalink
add bedrock instructions
Browse files Browse the repository at this point in the history
lint
  • Loading branch information
wongjingping committed Aug 30, 2024
1 parent 2765925 commit aac73ee
Show file tree
Hide file tree
Showing 3 changed files with 23 additions and 8 deletions.
15 changes: 15 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -379,6 +379,21 @@ python -W ignore main.py \
-n 10
```

### Bedrock

```bash
python3 main.py \
-db postgres \
-q data/instruct_basic_postgres.csv data/instruct_advanced_postgres.csv data/questions_gen_postgres.csv \
-o results/bedrock_llama_70b_basic.csv results/bedrock_llama_70b_advanced.csv results/bedrock_llama_70b_v1.csv \
-g bedrock \
-f prompts/prompt_cot_postgres.md \
--cot_table_alias prealias \
-m meta.llama3-70b-instruct-v1:0 \
-c 0 \
-p 10
```

### Together

Before running this, you must create an account with [Together.ai](https://together.ai/) and obtain an API key and store it with `export TOGETHER_API_KEY=<your_api_key>`. Then, install `together` with `pip install together`. You can then run the following command:
Expand Down
15 changes: 7 additions & 8 deletions eval/together_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,15 +13,17 @@
from utils.reporting import upload_results


client = Together(api_key=os.environ.get('TOGETHER_API_KEY'))
client = Together(api_key=os.environ.get("TOGETHER_API_KEY"))


def process_row(row: Dict, model: str):
start_time = time()
if model.startswith("meta-llama"):
stop = ["<|eot_id|>","<|eom_id|>"]
stop = ["<|eot_id|>", "<|eom_id|>"]
else:
print("Undefined stop token(s). Please specify the stop token(s) for the model.")
print(
"Undefined stop token(s). Please specify the stop token(s) for the model."
)
stop = []
messages = row["prompt"]
response = client.chat.completions.create(
Expand All @@ -30,13 +32,12 @@ def process_row(row: Dict, model: str):
max_tokens=800,
temperature=0.0,
stop=stop,
stream=False
stream=False,
)
content = response.choices[0].message.content
generated_query = content.split("```", 1)[0].strip()
end_time = time()


row["generated_query"] = generated_query
row["latency_seconds"] = end_time - start_time
row["tokens_used"] = None
Expand Down Expand Up @@ -132,9 +133,7 @@ def run_together_eval(args):
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = []
for row in df.to_dict("records"):
futures.append(
executor.submit(process_row, row, model)
)
futures.append(executor.submit(process_row, row, model))

with tqdm(as_completed(futures), total=len(futures)) as pbar:
for f in pbar:
Expand Down
1 change: 1 addition & 0 deletions utils/gen_prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ def generate_prompt(
Else, we will treat the file as a string template.
"""
from defog_data.metadata import dbs # to avoid CI error

is_json = prompt_file.endswith(".json")
if is_json:
with open(prompt_file, "r") as f:
Expand Down

0 comments on commit aac73ee

Please sign in to comment.