diff --git a/python/pyarrow/tests/interchange/test_conversion.py b/python/pyarrow/tests/interchange/test_conversion.py index 225210d8c7389..af2288425251d 100644 --- a/python/pyarrow/tests/interchange/test_conversion.py +++ b/python/pyarrow/tests/interchange/test_conversion.py @@ -326,7 +326,7 @@ def test_pandas_roundtrip_datetime(unit): @pytest.mark.pandas @pytest.mark.parametrize( - "np_float", [np.float32, np.float64] + "np_float", [np.float32, np.float64] # float16 operations not yet supported ) def test_pandas_to_pyarrow_with_missing(np_float): if Version(pd.__version__) < Version("1.5.0"): diff --git a/python/pyarrow/tests/parquet/common.py b/python/pyarrow/tests/parquet/common.py index 4401d3ca6bb75..ecfe47eefb7e0 100644 --- a/python/pyarrow/tests/parquet/common.py +++ b/python/pyarrow/tests/parquet/common.py @@ -128,6 +128,7 @@ def _test_dataframe(size=10000, seed=0): 'int16': _random_integers(size, np.int16), 'int32': _random_integers(size, np.int32), 'int64': _random_integers(size, np.int64), + 'float16': np.arange(size, dtype=np.float16), 'float32': np.random.randn(size).astype(np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, @@ -169,6 +170,7 @@ def alltypes_sample(size=10000, seed=0, categorical=False): 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), + 'float16': np.arange(size, dtype=np.float16), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, diff --git a/python/pyarrow/tests/parquet/test_basic.py b/python/pyarrow/tests/parquet/test_basic.py index 9bc59cbcf96eb..39120a2b42f8c 100644 --- a/python/pyarrow/tests/parquet/test_basic.py +++ b/python/pyarrow/tests/parquet/test_basic.py @@ -101,7 +101,7 @@ def test_set_dictionary_pagesize_limit(use_legacy_dataset): @pytest.mark.pandas @parametrize_legacy_dataset -def test_chunked_table_write(use_legacy_dataset): +def test_chunked_table_write(use_legacy_dataset): # # ARROW-232 tables = [] batch = pa.RecordBatch.from_pandas(alltypes_sample(size=10)) diff --git a/python/pyarrow/tests/parquet/test_metadata.py b/python/pyarrow/tests/parquet/test_metadata.py index 342fdb21aed56..090c35973e816 100644 --- a/python/pyarrow/tests/parquet/test_metadata.py +++ b/python/pyarrow/tests/parquet/test_metadata.py @@ -169,6 +169,11 @@ def test_parquet_metadata_lifetime(tempdir): ([-1, 2, 2, None, 4], pa.int16(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int32(), 'INT32', -1, 4, 1, 4, 0), ([-1, 2, 2, None, 4], pa.int64(), 'INT64', -1, 4, 1, 4, 0), + #( + # [-1.1, 2.2, 2.3, None, 4.4], pa.float16(), + # 'HALFFLOAT', -1.1, 4.4, 1, 4, 0 + #), + # float16 operations are not yet implemented ( [-1.1, 2.2, 2.3, None, 4.4], pa.float32(), 'FLOAT', -1.1, 4.4, 1, 4, 0 diff --git a/python/pyarrow/tests/parquet/test_pandas.py b/python/pyarrow/tests/parquet/test_pandas.py index 0ed305bff1945..4dcc0973a16d3 100644 --- a/python/pyarrow/tests/parquet/test_pandas.py +++ b/python/pyarrow/tests/parquet/test_pandas.py @@ -247,6 +247,7 @@ def test_pandas_parquet_pyfile_roundtrip(tempdir, use_legacy_dataset): size = 5 df = pd.DataFrame({ 'int64': np.arange(size, dtype=np.int64), + 'float16': np.arange(size, dtype=np.float16), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0, @@ -279,6 +280,7 @@ def test_pandas_parquet_configuration_options(tempdir, use_legacy_dataset): 'int16': np.arange(size, dtype=np.int16), 'int32': np.arange(size, dtype=np.int32), 'int64': np.arange(size, dtype=np.int64), + 'float16': np.arange(size, dtype=np.float16), 'float32': np.arange(size, dtype=np.float32), 'float64': np.arange(size, dtype=np.float64), 'bool': np.random.randn(size) > 0 diff --git a/python/pyarrow/tests/test_array.py b/python/pyarrow/tests/test_array.py index ed29bf5cae688..c728a842c1137 100644 --- a/python/pyarrow/tests/test_array.py +++ b/python/pyarrow/tests/test_array.py @@ -1447,6 +1447,7 @@ def test_cast_integers_unsafe(): def test_floating_point_truncate_safe(): safe_cases = [ + # float16 does not support casts yet (np.array([1.0, 2.0, 3.0], dtype='float32'), 'float32', np.array([1, 2, 3], dtype='i4'), pa.int32()), (np.array([1.0, 2.0, 3.0], dtype='float64'), 'float64',