Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

infra: drop py3.8 CI #1188

Merged
merged 6 commits into from
Nov 28, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/python_test.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Python CI

Check notice on line 1 in .github/workflows/python_test.yml

View workflow job for this annotation

GitHub Actions / benchmark

Benchmark results

......................................... create_5_000_run_trees: Mean +- std dev: 613 ms +- 45 ms ......................................... create_10_000_run_trees: Mean +- std dev: 1.19 sec +- 0.06 sec ......................................... create_20_000_run_trees: Mean +- std dev: 1.19 sec +- 0.05 sec ......................................... dumps_class_nested_py_branch_and_leaf_200x400: Mean +- std dev: 707 us +- 22 us ......................................... dumps_class_nested_py_leaf_50x100: Mean +- std dev: 25.0 ms +- 0.4 ms ......................................... dumps_class_nested_py_leaf_100x200: Mean +- std dev: 103 ms +- 2 ms ......................................... dumps_dataclass_nested_50x100: Mean +- std dev: 25.2 ms +- 0.2 ms ......................................... WARNING: the benchmark result may be unstable * the standard deviation (15.6 ms) is 24% of the mean (65.3 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. dumps_pydantic_nested_50x100: Mean +- std dev: 65.3 ms +- 15.6 ms ......................................... WARNING: the benchmark result may be unstable * the standard deviation (29.7 ms) is 14% of the mean (216 ms) Try to rerun the benchmark with more runs, values and/or loops. Run 'python -m pyperf system tune' command to reduce the system jitter. Use pyperf stats, pyperf dump and pyperf hist to analyze results. Use --quiet option to hide these warnings. dumps_pydanticv1_nested_50x100: Mean +- std dev: 216 ms +- 30 ms

Check notice on line 1 in .github/workflows/python_test.yml

View workflow job for this annotation

GitHub Actions / benchmark

Comparison against main

+-----------------------------------+----------+------------------------+ | Benchmark | main | changes | +===================================+==========+========================+ | dumps_pydantic_nested_50x100 | 71.7 ms | 65.3 ms: 1.10x faster | +-----------------------------------+----------+------------------------+ | dumps_pydanticv1_nested_50x100 | 228 ms | 216 ms: 1.06x faster | +-----------------------------------+----------+------------------------+ | dumps_dataclass_nested_50x100 | 25.9 ms | 25.2 ms: 1.03x faster | +-----------------------------------+----------+------------------------+ | dumps_class_nested_py_leaf_50x100 | 25.5 ms | 25.0 ms: 1.02x faster | +-----------------------------------+----------+------------------------+ | create_20_000_run_trees | 1.21 sec | 1.19 sec: 1.02x faster | +-----------------------------------+----------+------------------------+ | Geometric mean | (ref) | 1.03x faster | +-----------------------------------+----------+------------------------+ Benchmark hidden because not significant (4): create_5_000_run_trees, create_10_000_run_trees, dumps_class_nested_py_leaf_100x200, dumps_class_nested_py_branch_and_leaf_200x400

on:
push:
Expand All @@ -19,11 +19,11 @@
strategy:
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
- "3.13"
defaults:
run:
working-directory: python
Expand Down
19 changes: 0 additions & 19 deletions python/langsmith/_internal/_aiter.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,25 +334,6 @@ def accepts_context(callable: Callable[..., Any]) -> bool:
return False


# Ported from Python 3.9+ to support Python 3.8
async def aio_to_thread(
func, /, *args, __ctx: Optional[contextvars.Context] = None, **kwargs
):
"""Asynchronously run function *func* in a separate thread.

Any *args and **kwargs supplied for this function are directly passed
to *func*. Also, the current :class:`contextvars.Context` is propagated,
allowing context variables from the main thread to be accessed in the
separate thread.

Return a coroutine that can be awaited to get the eventual result of *func*.
"""
loop = asyncio.get_running_loop()
ctx = __ctx or contextvars.copy_context()
func_call = functools.partial(ctx.run, func, *args, **kwargs)
return await loop.run_in_executor(None, func_call)


@functools.lru_cache(maxsize=1)
def asyncio_accepts_context():
"""Check if the current asyncio event loop accepts a context argument."""
Expand Down
12 changes: 6 additions & 6 deletions python/langsmith/evaluation/_arunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,13 +344,13 @@ async def aevaluate_existing(
project = (
experiment
if isinstance(experiment, schemas.TracerSession)
else (await aitertools.aio_to_thread(_load_experiment, experiment, client))
else (await asyncio.to_thread(_load_experiment, experiment, client))
)
runs = await aitertools.aio_to_thread(
runs = await asyncio.to_thread(
_load_traces, experiment, client, load_nested=load_nested
)
data_map = await aitertools.aio_to_thread(_load_examples_map, client, project)
data = [data_map[run.reference_example_id] for run in runs]
data_map = await asyncio.to_thread(_load_examples_map, client, project)
data = [data_map[cast(uuid.UUID, run.reference_example_id)] for run in runs]
return await _aevaluate(
runs,
data=data,
Expand Down Expand Up @@ -384,7 +384,7 @@ async def _aevaluate(
)
client = client or rt.get_cached_client()
runs = None if is_async_target else cast(Iterable[schemas.Run], target)
experiment_, runs = await aitertools.aio_to_thread(
experiment_, runs = await asyncio.to_thread(
_resolve_experiment,
experiment,
runs,
Expand Down Expand Up @@ -770,7 +770,7 @@ async def _aapply_summary_evaluators(
for result in flattened_results:
feedback = result.dict(exclude={"target_run_id"})
evaluator_info = feedback.pop("evaluator_info", None)
await aitertools.aio_to_thread(
await asyncio.to_thread(
self.client.create_feedback,
**feedback,
run_id=None,
Expand Down
34 changes: 13 additions & 21 deletions python/langsmith/run_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -487,7 +487,7 @@ async def async_wrapper(
**kwargs: Any,
) -> Any:
"""Async version of wrapper function."""
run_container = await aitertools.aio_to_thread(
run_container = await asyncio.to_thread(
_setup_run,
func,
container_input=container_input,
Expand Down Expand Up @@ -516,19 +516,17 @@ async def async_wrapper(
except BaseException as e:
# shield from cancellation, given we're catching all exceptions
await asyncio.shield(
aitertools.aio_to_thread(_on_run_end, run_container, error=e)
asyncio.to_thread(_on_run_end, run_container, error=e)
)
raise e
await aitertools.aio_to_thread(
_on_run_end, run_container, outputs=function_result
)
await asyncio.to_thread(_on_run_end, run_container, outputs=function_result)
return function_result

@functools.wraps(func)
async def async_generator_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> AsyncGenerator:
run_container = await aitertools.aio_to_thread(
run_container = await asyncio.to_thread(
_setup_run,
func,
container_input=container_input,
Expand Down Expand Up @@ -574,15 +572,15 @@ async def async_generator_wrapper(
yield item
except BaseException as e:
await asyncio.shield(
aitertools.aio_to_thread(
asyncio.to_thread(
_on_run_end,
run_container,
error=e,
outputs=_get_function_result(results, reduce_fn),
)
)
raise e
await aitertools.aio_to_thread(
await asyncio.to_thread(
_on_run_end,
run_container,
outputs=_get_function_result(results, reduce_fn),
Expand Down Expand Up @@ -700,7 +698,7 @@ def stream_wrapper(
async def async_stream_wrapper(
*args: Any, langsmith_extra: Optional[LangSmithExtra] = None, **kwargs: Any
) -> Any:
trace_container = await aitertools.aio_to_thread(
trace_container = await asyncio.to_thread(
_setup_run,
func,
container_input=container_input,
Expand All @@ -716,7 +714,7 @@ async def async_stream_wrapper(
kwargs.pop("config", None)
stream = await func(*args, **kwargs)
except Exception as e:
await aitertools.aio_to_thread(_on_run_end, trace_container, error=e)
await asyncio.to_thread(_on_run_end, trace_container, error=e)
raise

if hasattr(stream, "__aiter__"):
Expand All @@ -726,7 +724,7 @@ async def async_stream_wrapper(
return _TracedStream(stream, trace_container, reduce_fn)

# If it's not iterable, end the trace immediately
await aitertools.aio_to_thread(_on_run_end, trace_container, outputs=stream)
await asyncio.to_thread(_on_run_end, trace_container, outputs=stream)
return stream

if inspect.isasyncgenfunction(func):
Expand Down Expand Up @@ -1013,7 +1011,7 @@ async def __aenter__(self) -> run_trees.RunTree:
run_trees.RunTree: The newly created run.
"""
ctx = copy_context()
result = await aitertools.aio_to_thread(self._setup, __ctx=ctx)
result = await asyncio.to_thread(self._setup)
# Set the context for the current thread
_set_tracing_context(get_tracing_context(ctx))
return result
Expand All @@ -1034,14 +1032,10 @@ async def __aexit__(
ctx = copy_context()
if exc_type is not None:
await asyncio.shield(
aitertools.aio_to_thread(
self._teardown, exc_type, exc_value, traceback, __ctx=ctx
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@hinthornw ok to remove __ctx here? afaict to_thread already calls copy_context()

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think so but would need to check

)
asyncio.to_thread(self._teardown, exc_type, exc_value, traceback)
)
else:
await aitertools.aio_to_thread(
self._teardown, exc_type, exc_value, traceback, __ctx=ctx
)
await asyncio.to_thread(self._teardown, exc_type, exc_value, traceback)
_set_tracing_context(get_tracing_context(ctx))


Expand Down Expand Up @@ -1712,9 +1706,7 @@ def __init__(

async def _aend_trace(self, error: Optional[BaseException] = None):
ctx = copy_context()
await asyncio.shield(
aitertools.aio_to_thread(self._end_trace, error, __ctx=ctx)
)
await asyncio.shield(asyncio.to_thread(self._end_trace, error))
_set_tracing_context(get_tracing_context(ctx))

async def __anext__(self) -> T:
Expand Down
Loading
Loading