Skip to content

Commit

Permalink
Remove unnecessary try
Browse files Browse the repository at this point in the history
  • Loading branch information
ianmcook committed Mar 12, 2024
1 parent 2ac49c9 commit af4b472
Showing 1 changed file with 12 additions and 13 deletions.
25 changes: 12 additions & 13 deletions python/pyspark/sql/pandas/conversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,22 +270,21 @@ def _toArrow(self) -> "pa.Table":

jconf = self.sparkSession._jconf

try:
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version
from pyspark.sql.pandas.types import to_arrow_schema
from pyspark.sql.pandas.utils import require_minimum_pyarrow_version

require_minimum_pyarrow_version()
schema = to_arrow_schema(self.schema)
require_minimum_pyarrow_version()
schema = to_arrow_schema(self.schema)

import pyarrow as pa
import pyarrow as pa

self_destruct = jconf.arrowPySparkSelfDestructEnabled()
batches = self._collect_as_arrow(split_batches=self_destruct)
table = pa.Table.from_batches(batches, schema=schema)
# Ensure only the table has a reference to the batches, so that
# self_destruct (if enabled) is effective
del batches
return table
self_destruct = jconf.arrowPySparkSelfDestructEnabled()
batches = self._collect_as_arrow(split_batches=self_destruct)
table = pa.Table.from_batches(batches, schema=schema)
# Ensure only the table has a reference to the batches, so that
# self_destruct (if enabled) is effective
del batches
return table

def _collect_as_arrow(self, split_batches: bool = False) -> List["pa.RecordBatch"]:
"""
Expand Down

0 comments on commit af4b472

Please sign in to comment.