Skip to content

Commit

Permalink
debugging
Browse files Browse the repository at this point in the history
  • Loading branch information
meg-huggingface committed Jul 13, 2024
1 parent 383bbd5 commit 8618a89
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 2 deletions.
9 changes: 8 additions & 1 deletion lm_eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def simple_evaluate(
fewshot_as_multiturn: bool = False,
gen_kwargs: Optional[str] = None,
task_manager: Optional[TaskManager] = None,
verbosity: str = "INFO",
verbosity: str = "DEBUG",
predict_only: bool = False,
random_seed: int = 0,
numpy_random_seed: int = 1234,
Expand Down Expand Up @@ -464,8 +464,15 @@ def evaluate(
for _ in range(padding_requests[reqtype]):
cloned_reqs.extend([req] * req.repeats)

print("lm is")
print(lm)
print("reqtype is")
print(reqtype)
#print("clone_reqs is")
#print(cloned_reqs)
# run requests through model
resps = getattr(lm, reqtype)(cloned_reqs)
print(lm.world_size)

# put responses from model into a list of length K for each request.
for x, req in zip(resps, cloned_reqs):
Expand Down
2 changes: 1 addition & 1 deletion lm_eval/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class HFLM(TemplateLM):
"""

AUTO_MODEL_CLASS = None
_DEFAULT_MAX_LENGTH = 2048
_DEFAULT_MAX_LENGTH = None

def __init__(
self,
Expand Down
8 changes: 8 additions & 0 deletions lm_eval/tasks/realtoxicityprompts/metric.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,17 @@ def toxicity_perspective_api(
scores = []
toxicity_scores = []

print("Beginning session")
s = requests.Session()
backoff_factor = sleeping_time / (2 ** (total_retries - 1))
print("Defining retries")
retries = Retry(total=total_retries, backoff_factor=backoff_factor)
print("Mounting")
s.mount("http://", HTTPAdapter(max_retries=retries))

for pred in predictions:
print("Looking at pred")
print(pred)
data = {
"comment": {"text": pred},
"languages": ["en"],
Expand All @@ -56,9 +61,12 @@ def toxicity_perspective_api(
"content-type": "application/json",
}
try:
print("Posting")
req_response = s.post(url, json=data, headers=headers)
if req_response.ok:
response = json.loads(req_response.text)
print("Response is:")
print(response)
if (
"attributeScores" in response
and "TOXICITY" in response["attributeScores"]
Expand Down

0 comments on commit 8618a89

Please sign in to comment.