Skip to content

Commit

Permalink
Assorted cleanups (#26975)
Browse files Browse the repository at this point in the history
  • Loading branch information
jbrockmendel authored and jreback committed Jun 21, 2019
1 parent cfd65e9 commit a4a18a9
Show file tree
Hide file tree
Showing 5 changed files with 15 additions and 28 deletions.
19 changes: 0 additions & 19 deletions pandas/core/internals/managers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from pandas.core.dtypes.missing import isna

import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import _maybe_to_sparse
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexing import maybe_convert_indices
Expand Down Expand Up @@ -1727,10 +1726,6 @@ def form_blocks(arrays, names, axes):
object_blocks = _simple_blockify(items_dict['ObjectBlock'], np.object_)
blocks.extend(object_blocks)

if len(items_dict['SparseBlock']) > 0:
sparse_blocks = _sparse_blockify(items_dict['SparseBlock'])
blocks.extend(sparse_blocks)

if len(items_dict['CategoricalBlock']) > 0:
cat_blocks = [make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict['CategoricalBlock']]
Expand Down Expand Up @@ -1797,20 +1792,6 @@ def _multi_blockify(tuples, dtype=None):
return new_blocks


def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""

new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(array, placement=[i])
new_blocks.append(block)

return new_blocks


def _stack_arrays(tuples, dtype):

# fml
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/formats/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -1567,7 +1567,7 @@ def __call__(self, num):

formatted = format_str.format(mant=mant, prefix=prefix)

return formatted # .strip()
return formatted


def set_eng_float_format(accuracy=3, use_eng_prefix=False):
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -623,7 +623,7 @@ def insert_data(self):
# GH 9086: Ensure we return datetimes with timezone info
# Need to return 2-D data; DatetimeIndex is 1D
d = b.values.to_pydatetime()
d = np.expand_dims(d, axis=0)
d = np.atleast_2d(d)
else:
# convert to microsecond resolution for datetime.datetime
d = b.values.astype('M8[us]').astype(object)
Expand Down
5 changes: 3 additions & 2 deletions pandas/tests/frame/test_constructors.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
import pytest

from pandas.compat import PY36, is_platform_little_endian
Expand Down Expand Up @@ -839,7 +840,7 @@ def test_constructor_maskedrecarray_dtype(self):
data = np.ma.array(
np.ma.zeros(5, dtype=[('date', '<f8'), ('price', '<f8')]),
mask=[False] * 5)
data = data.view(ma.mrecords.mrecarray)
data = data.view(mrecords.mrecarray)
result = pd.DataFrame(data, dtype=int)
expected = pd.DataFrame(np.zeros((5, 2), dtype=int),
columns=['date', 'price'])
Expand Down Expand Up @@ -868,7 +869,7 @@ def test_constructor_mrecarray(self):
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = ma.mrecords.fromarrays(data, names=names)
mrecs = mrecords.fromarrays(data, names=names)

# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
Expand Down
15 changes: 10 additions & 5 deletions pandas/tests/frame/test_missing.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,14 +241,15 @@ def test_fillna_mixed_float(self, mixed_float_frame):
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))

def test_fillna_other(self):
def test_fillna_empty(self):
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)

# with different dtype (GH3386)
def test_fillna_different_dtype(self):
# with different dtype (GH#3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])

Expand All @@ -261,6 +262,7 @@ def test_fillna_other(self):
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)

def test_fillna_limit_and_value(self):
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
Expand All @@ -272,8 +274,9 @@ def test_fillna_other(self):
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)

def test_fillna_datelike(self):
# with datelike
# GH 6344
# GH#6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
Expand All @@ -285,8 +288,9 @@ def test_fillna_other(self):
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)

def test_fillna_tzaware(self):
# with timezone
# GH 15855
# GH#15855
df = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT]})
exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
Expand All @@ -299,8 +303,9 @@ def test_fillna_other(self):
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
assert_frame_equal(df.fillna(method='bfill'), exp)

def test_fillna_tzaware_different_column(self):
# with timezone in another column
# GH 15522
# GH#15522
df = pd.DataFrame({'A': pd.date_range('20130101', periods=4,
tz='US/Eastern'),
'B': [1, 2, np.nan, np.nan]})
Expand Down

0 comments on commit a4a18a9

Please sign in to comment.