Skip to content

Commit

Permalink
fix: resort to eval if ast eval does not work
Browse files Browse the repository at this point in the history
  • Loading branch information
shreyashankar committed Oct 8, 2024
1 parent d0975c8 commit fab6641
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 2 deletions.
6 changes: 5 additions & 1 deletion docetl/operations/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,11 @@ def safe_eval(expression: str, output: Dict) -> bool:
# Safely evaluate the expression
return bool(aeval(expression))
except Exception:
return False
# try to evaluate with python eval
try:
return bool(eval(expression, locals={"output": output}))
except Exception:
return False


class APIWrapper(object):
Expand Down
2 changes: 1 addition & 1 deletion docs/concepts/operators.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ validate:
- all(len(insight["supporting_actions"]) >= 1 for insight in output["insights"])
```

Access variables using dictionary syntax: `input["field"]` or `output["field"]`.
Access variables using dictionary syntax: `output["field"]`. Note that you can't access `input` docs in validation, but the output docs should have all the fields from the input docs (for non-reduce operations), since fields pass through unchanged.

The `num_retries_on_validate_failure` attribute specifies how many times to retry the LLM if any validation statements fail.

Expand Down
46 changes: 46 additions & 0 deletions tests/test_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import pytest
from docetl.operations.map import MapOperation
from tests.conftest import api_wrapper, default_model, max_threads


@pytest.fixture
def map_config_with_validation():
return {
"name": "sentiment_analysis_with_validation",
"type": "map",
"prompt": "Analyze the sentiment of the following text: '{{ input.text }}'. Classify it as either positive, negative, or neutral.",
"output": {"schema": {"sentiment": "string", "confidence": "float"}},
"model": "gpt-4o-mini",
"validate": [
"output['sentiment'] in ['positive', 'negative', 'neutral']",
"0 <= output['confidence'] <= 1",
],
"num_retries_on_validate_failure": 2,
}


@pytest.fixture
def sample_data():
return [
{"text": "I love this product! It's amazing."},
{"text": "This is the worst experience ever."},
{"text": "The weather is okay today."},
]


def test_map_operation_with_validation(
map_config_with_validation, sample_data, api_wrapper, default_model, max_threads
):
operation = MapOperation(
api_wrapper, map_config_with_validation, default_model, max_threads
)
results, cost = operation.execute(sample_data)

assert len(results) == len(sample_data)
assert cost > 0

for result in results:
assert "sentiment" in result
assert "confidence" in result
assert result["sentiment"] in ["positive", "negative", "neutral"]
assert 0 <= result["confidence"] <= 1

0 comments on commit fab6641

Please sign in to comment.