Skip to content

Commit

Permalink
Merge branch 'pandas-dev:main' into Fix#58748
Browse files Browse the repository at this point in the history
  • Loading branch information
SiddheshBangar authored May 21, 2024
2 parents 5ad5d89 + 695b170 commit 19a07c3
Show file tree
Hide file tree
Showing 9 changed files with 247 additions and 166 deletions.
1 change: 0 additions & 1 deletion ci/code_checks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -440,7 +440,6 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
-i "pandas.errors.UnsortedIndexError SA01" \
-i "pandas.errors.UnsupportedFunctionCall SA01" \
-i "pandas.errors.ValueLabelTypeMismatch SA01" \
-i "pandas.get_option SA01" \
-i "pandas.infer_freq SA01" \
-i "pandas.interval_range RT03" \
-i "pandas.io.formats.style.Styler.apply RT03" \
Expand Down
2 changes: 1 addition & 1 deletion doc/source/user_guide/merging.rst
Original file line number Diff line number Diff line change
Expand Up @@ -763,7 +763,7 @@ Joining a single Index to a MultiIndex
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

You can join a :class:`DataFrame` with a :class:`Index` to a :class:`DataFrame` with a :class:`MultiIndex` on a level.
The ``name`` of the :class:`Index` with match the level name of the :class:`MultiIndex`.
The ``name`` of the :class:`Index` will match the level name of the :class:`MultiIndex`.

.. ipython:: python
Expand Down
6 changes: 6 additions & 0 deletions pandas/_config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,12 @@ def get_option(pat: str) -> Any:
------
OptionError : if no such option exists
See Also
--------
set_option : Set the value of the specified option or options.
reset_option : Reset one or more options to their default value.
describe_option : Print the description for one or more registered options.
Notes
-----
For all available options, please view the :ref:`User Guide <options.available>`
Expand Down
129 changes: 78 additions & 51 deletions pandas/core/reshape/reshape.py
Original file line number Diff line number Diff line change
Expand Up @@ -925,27 +925,99 @@ def _reorder_for_extension_array_stack(
def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
if frame.columns.nunique() != len(frame.columns):
raise ValueError("Columns with duplicate values are not supported in stack")

# If we need to drop `level` from columns, it needs to be in descending order
set_levels = set(level)
drop_levnums = sorted(level, reverse=True)
stack_cols = frame.columns._drop_level_numbers(
[k for k in range(frame.columns.nlevels - 1, -1, -1) if k not in set_levels]
)

result = stack_reshape(frame, level, set_levels, stack_cols)

# Construct the correct MultiIndex by combining the frame's index and
# stacked columns.
ratio = 0 if frame.empty else len(result) // len(frame)

index_levels: list | FrozenList
if isinstance(frame.index, MultiIndex):
index_levels = frame.index.levels
index_codes = list(np.tile(frame.index.codes, (1, ratio)))
else:
codes, uniques = factorize(frame.index, use_na_sentinel=False)
index_levels = [uniques]
index_codes = list(np.tile(codes, (1, ratio)))

if len(level) > 1:
# Arrange columns in the order we want to take them, e.g. level=[2, 0, 1]
sorter = np.argsort(level)
assert isinstance(stack_cols, MultiIndex)
ordered_stack_cols = stack_cols._reorder_ilevels(sorter)
else:
ordered_stack_cols = stack_cols

stack_cols_unique = stack_cols.unique()
ordered_stack_cols_unique = ordered_stack_cols.unique()
if isinstance(ordered_stack_cols, MultiIndex):
column_levels = ordered_stack_cols.levels
column_codes = ordered_stack_cols.drop_duplicates().codes
else:
column_levels = [ordered_stack_cols_unique]
column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]

# error: Incompatible types in assignment (expression has type "list[ndarray[Any,
# dtype[Any]]]", variable has type "FrozenList")
column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] # type: ignore[assignment]
result.index = MultiIndex(
levels=index_levels + column_levels,
codes=index_codes + column_codes,
names=frame.index.names + list(ordered_stack_cols.names),
verify_integrity=False,
)

# sort result, but faster than calling sort_index since we know the order we need
len_df = len(frame)
n_uniques = len(ordered_stack_cols_unique)
indexer = np.arange(n_uniques)
idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques)
result = result.take(idxs)

# Reshape/rename if needed and dropna
if result.ndim == 2 and frame.columns.nlevels == len(level):
if len(result.columns) == 0:
result = Series(index=result.index)
else:
result = result.iloc[:, 0]
if result.ndim == 1:
result.name = None

return result


def stack_reshape(
frame: DataFrame, level: list[int], set_levels: set[int], stack_cols: Index
) -> Series | DataFrame:
"""Reshape the data of a frame for stack.
This function takes care of most of the work that stack needs to do. Caller
will sort the result once the appropriate index is set.
Parameters
----------
frame: DataFrame
DataFrame that is to be stacked.
level: list of ints.
Levels of the columns to stack.
set_levels: set of ints.
Same as level, but as a set.
stack_cols: Index.
Columns of the result when the DataFrame is stacked.
Returns
-------
The data of behind the stacked DataFrame.
"""
# If we need to drop `level` from columns, it needs to be in descending order
drop_levnums = sorted(level, reverse=True)

# Grab data for each unique index to be stacked
buf = []
for idx in stack_cols_unique:
for idx in stack_cols.unique():
if len(frame.columns) == 1:
data = frame.copy()
else:
Expand All @@ -972,10 +1044,8 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
data.columns = RangeIndex(len(data.columns))
buf.append(data)

result: Series | DataFrame
if len(buf) > 0 and not frame.empty:
result = concat(buf, ignore_index=True)
ratio = len(result) // len(frame)
else:
# input is empty
if len(level) < frame.columns.nlevels:
Expand All @@ -984,54 +1054,11 @@ def stack_v3(frame: DataFrame, level: list[int]) -> Series | DataFrame:
else:
new_columns = [0]
result = DataFrame(columns=new_columns, dtype=frame._values.dtype)
ratio = 0

if len(level) < frame.columns.nlevels:
# concat column order may be different from dropping the levels
desired_columns = frame.columns._drop_level_numbers(drop_levnums).unique()
if not result.columns.equals(desired_columns):
result = result[desired_columns]

# Construct the correct MultiIndex by combining the frame's index and
# stacked columns.
index_levels: list | FrozenList
if isinstance(frame.index, MultiIndex):
index_levels = frame.index.levels
index_codes = list(np.tile(frame.index.codes, (1, ratio)))
else:
codes, uniques = factorize(frame.index, use_na_sentinel=False)
index_levels = [uniques]
index_codes = list(np.tile(codes, (1, ratio)))
if isinstance(ordered_stack_cols, MultiIndex):
column_levels = ordered_stack_cols.levels
column_codes = ordered_stack_cols.drop_duplicates().codes
else:
column_levels = [ordered_stack_cols.unique()]
column_codes = [factorize(ordered_stack_cols_unique, use_na_sentinel=False)[0]]
# error: Incompatible types in assignment (expression has type "list[ndarray[Any,
# dtype[Any]]]", variable has type "FrozenList")
column_codes = [np.repeat(codes, len(frame)) for codes in column_codes] # type: ignore[assignment]
result.index = MultiIndex(
levels=index_levels + column_levels,
codes=index_codes + column_codes,
names=frame.index.names + list(ordered_stack_cols.names),
verify_integrity=False,
)

# sort result, but faster than calling sort_index since we know the order we need
len_df = len(frame)
n_uniques = len(ordered_stack_cols_unique)
indexer = np.arange(n_uniques)
idxs = np.tile(len_df * indexer, len_df) + np.repeat(np.arange(len_df), n_uniques)
result = result.take(idxs)

# Reshape/rename if needed and dropna
if result.ndim == 2 and frame.columns.nlevels == len(level):
if len(result.columns) == 0:
result = Series(index=result.index)
else:
result = result.iloc[:, 0]
if result.ndim == 1:
result.name = None

return result
55 changes: 34 additions & 21 deletions pandas/tests/groupby/test_apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,8 @@ def test_groupby_as_index_apply():
tm.assert_index_equal(res_as_apply, exp_as_apply)
tm.assert_index_equal(res_not_as_apply, exp_not_as_apply)


def test_groupby_as_index_apply_str():
ind = Index(list("abcde"))
df = DataFrame([[1, 2], [2, 3], [1, 4], [1, 5], [2, 6]], index=ind)
msg = "DataFrameGroupBy.apply operated on the grouping columns"
Expand Down Expand Up @@ -379,8 +381,8 @@ def f(piece):
{"value": piece, "demeaned": piece - piece.mean(), "logged": logged}
)

dr = bdate_range("1/1/2000", periods=100)
ts = Series(np.random.default_rng(2).standard_normal(100), index=dr)
dr = bdate_range("1/1/2000", periods=10)
ts = Series(np.random.default_rng(2).standard_normal(10), index=dr)

grouped = ts.groupby(lambda x: x.month, group_keys=False)
result = grouped.apply(f)
Expand Down Expand Up @@ -639,13 +641,13 @@ def reindex_helper(x):
def test_apply_corner_cases():
# #535, can't use sliding iterator

N = 1000
N = 10
labels = np.random.default_rng(2).integers(0, 100, size=N)
df = DataFrame(
{
"key": labels,
"value1": np.random.default_rng(2).standard_normal(N),
"value2": ["foo", "bar", "baz", "qux"] * (N // 4),
"value2": ["foo", "bar", "baz", "qux", "a"] * (N // 5),
}
)

Expand Down Expand Up @@ -680,6 +682,8 @@ def test_apply_numeric_coercion_when_datetime():
result = df.groupby(["Number"]).apply(lambda x: x.iloc[0])
tm.assert_series_equal(result["Str"], expected["Str"])


def test_apply_numeric_coercion_when_datetime_getitem():
# GH 15421
df = DataFrame(
{"A": [10, 20, 30], "B": ["foo", "3", "4"], "T": [pd.Timestamp("12:31:22")] * 3}
Expand All @@ -695,6 +699,8 @@ def get_B(g):
expected.index = df.A
tm.assert_series_equal(result, expected)


def test_apply_numeric_coercion_when_datetime_with_nat():
# GH 14423
def predictions(tool):
out = Series(index=["p1", "p2", "useTime"], dtype=object)
Expand Down Expand Up @@ -843,10 +849,24 @@ def test_func(x):
tm.assert_frame_equal(result, expected)


def test_groupby_apply_none_first():
@pytest.mark.parametrize(
"in_data, out_idx, out_data",
[
[
{"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]},
[[1, 1], [0, 2]],
{"groups": [1, 1], "vars": [0, 2]},
],
[
{"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]},
[[2, 2], [1, 3]],
{"groups": [2, 2], "vars": [1, 3]},
],
],
)
def test_groupby_apply_none_first(in_data, out_idx, out_data):
# GH 12824. Tests if apply returns None first.
test_df1 = DataFrame({"groups": [1, 1, 1, 2], "vars": [0, 1, 2, 3]})
test_df2 = DataFrame({"groups": [1, 2, 2, 2], "vars": [0, 1, 2, 3]})
test_df1 = DataFrame(in_data)

def test_func(x):
if x.shape[0] < 2:
Expand All @@ -856,14 +876,9 @@ def test_func(x):
msg = "DataFrameGroupBy.apply operated on the grouping columns"
with tm.assert_produces_warning(DeprecationWarning, match=msg):
result1 = test_df1.groupby("groups").apply(test_func)
with tm.assert_produces_warning(DeprecationWarning, match=msg):
result2 = test_df2.groupby("groups").apply(test_func)
index1 = MultiIndex.from_arrays([[1, 1], [0, 2]], names=["groups", None])
index2 = MultiIndex.from_arrays([[2, 2], [1, 3]], names=["groups", None])
expected1 = DataFrame({"groups": [1, 1], "vars": [0, 2]}, index=index1)
expected2 = DataFrame({"groups": [2, 2], "vars": [1, 3]}, index=index2)
index1 = MultiIndex.from_arrays(out_idx, names=["groups", None])
expected1 = DataFrame(out_data, index=index1)
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)


def test_groupby_apply_return_empty_chunk():
Expand All @@ -883,18 +898,16 @@ def test_groupby_apply_return_empty_chunk():
tm.assert_series_equal(result, expected)


def test_apply_with_mixed_types():
@pytest.mark.parametrize("meth", ["apply", "transform"])
def test_apply_with_mixed_types(meth):
# gh-20949
df = DataFrame({"A": "a a b".split(), "B": [1, 2, 3], "C": [4, 6, 5]})
g = df.groupby("A", group_keys=False)

result = g.transform(lambda x: x / x.sum())
result = getattr(g, meth)(lambda x: x / x.sum())
expected = DataFrame({"B": [1 / 3.0, 2 / 3.0, 1], "C": [0.4, 0.6, 1.0]})
tm.assert_frame_equal(result, expected)

result = g.apply(lambda x: x / x.sum())
tm.assert_frame_equal(result, expected)


def test_func_returns_object():
# GH 28652
Expand Down Expand Up @@ -1106,7 +1119,7 @@ def test_apply_function_with_indexing_return_column():

@pytest.mark.parametrize(
"udf",
[(lambda x: x.copy()), (lambda x: x.copy().rename(lambda y: y + 1))],
[lambda x: x.copy(), lambda x: x.copy().rename(lambda y: y + 1)],
)
@pytest.mark.parametrize("group_keys", [True, False])
def test_apply_result_type(group_keys, udf):
Expand Down Expand Up @@ -1214,7 +1227,7 @@ def test_apply_with_date_in_multiindex_does_not_convert_to_timestamp():
expected = df.iloc[[0, 2, 3]]
expected = expected.reset_index()
expected.index = MultiIndex.from_frame(expected[["A", "B", "idx"]])
expected = expected.drop(columns="idx")
expected = expected.drop(columns=["idx"])

tm.assert_frame_equal(result, expected)
for val in result.index.levels[1]:
Expand Down
Loading

0 comments on commit 19a07c3

Please sign in to comment.