Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

BUG/REG: RollingGroupby MultiIndex levels dropped #38737

Merged
merged 6 commits into from
Dec 29, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion doc/source/whatsnew/v1.2.1.rst
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ including other versions of pandas.

Fixed regressions
~~~~~~~~~~~~~~~~~
-
- Fixed a regression in ``groupby().rolling()`` where :class:`MultiIndex` levels were dropped (:issue:`38523`)
-

.. ---------------------------------------------------------------------------
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/shared_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
When calling ``groupby().apply()``, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
Expand Down
28 changes: 11 additions & 17 deletions pandas/core/window/rolling.py
Original file line number Diff line number Diff line change
Expand Up @@ -775,28 +775,22 @@ def _apply(
numba_cache_key,
**kwargs,
)
# Reconstruct the resulting MultiIndex from tuples
# Reconstruct the resulting MultiIndex
# 1st set of levels = group by labels
# 2nd set of levels = original index
# Ignore 2nd set of levels if a group by label include an index level
result_index_names = [
grouping.name for grouping in self._groupby.grouper._groupings
]
grouped_object_index = None
# 2nd set of levels = original DataFrame/Series index
grouped_object_index = self.obj.index
grouped_index_name = [*grouped_object_index.names]
groupby_keys = [grouping.name for grouping in self._groupby.grouper._groupings]
result_index_names = groupby_keys + grouped_index_name

column_keys = [
drop_columns = [
key
for key in result_index_names
for key in groupby_keys
if key not in self.obj.index.names or key is None
]

if len(column_keys) == len(result_index_names):
grouped_object_index = self.obj.index
grouped_index_name = [*grouped_object_index.names]
result_index_names += grouped_index_name
else:
# Our result will have still kept the column in the result
result = result.drop(columns=column_keys, errors="ignore")
if len(drop_columns) != len(groupby_keys):
# Our result will have kept groupby columns which should be dropped
result = result.drop(columns=drop_columns, errors="ignore")

codes = self._groupby.grouper.codes
levels = self._groupby.grouper.levels
Expand Down
47 changes: 42 additions & 5 deletions pandas/tests/window/test_groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,23 +556,31 @@ def test_groupby_rolling_nans_in_index(self, rollings, key):
with pytest.raises(ValueError, match=f"{key} must be monotonic"):
df.groupby("c").rolling("60min", **rollings)

def test_groupby_rolling_group_keys(self):
@pytest.mark.parametrize("group_keys", [True, False])
def test_groupby_rolling_group_keys(self, group_keys):
# GH 37641
# GH 38523: GH 37641 actually was not a bug.
# group_keys only applies to groupby.apply directly
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))

s = Series([1, 2, 3], index=index)
result = s.groupby(["idx1", "idx2"], group_keys=False).rolling(1).mean()
result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean()
expected = Series(
[1.0, 2.0, 3.0],
index=MultiIndex.from_tuples(
[("val1", "val1"), ("val1", "val1"), ("val2", "val2")],
names=["idx1", "idx2"],
[
("val1", "val1", "val1", "val1"),
("val1", "val1", "val1", "val1"),
("val2", "val2", "val2", "val2"),
],
names=["idx1", "idx2", "idx1", "idx2"],
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are we sure this is the behaviour we want?
It might be the most consistent, but it's also kind of useless to repeat those index levels .. So we should at least have a way to avoid getting that?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would say the consistency is more maintainable on our end compared to additionally including logic to de-duplicate index levels given some condition.

I would prefer the user explicitly call droplevel.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm

In [134]: df.groupby(level=0).transform('max')                                                                                                          
Out[134]: 
                Max Speed
Animal Type              
Falcon Captive      390.0
       Wild         390.0
Parrot Captive       30.0
       Wild          30.0

so i think we are doing some magic in groupby for this. These are conceptually similar operations.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Before indexers were implemented for groupby().rolling(), this was the result:

In [1]: import pandas as pd

In [2]: pd.__version__
Out[2]: '1.0.5'

In [3]: from pandas import *

In [4]:         arrays = [
   ...:             ["Falcon", "Falcon", "Parrot", "Parrot"],
   ...:             ["Captive", "Wild", "Captive", "Wild"],
   ...:         ]
   ...:         index = MultiIndex.from_arrays(arrays, names=("Animal", "Type"))
   ...:         df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index)
   ...:         result = df.groupby(level=0)["Max Speed"].rolling(2).sum()

In [5]: result
Out[5]:
Animal  Animal  Type
Falcon  Falcon  Captive      NaN
                Wild       740.0
Parrot  Parrot  Captive      NaN
                Wild        50.0
Name: Max Speed, dtype: float64

which I think we should be trying to match. Though I'm not sure if we have solid conventions of the resulting index when using groupby.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yep i see that. ok i think that we should revert for 1.2.x and then decide for 1.3 is prob ok. i am leaning towards have the same as groupby here.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay once this is merged in I can create another issue to discuss what the index behavior should be for groupby rolling with duplicate index levels.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

When comparing to 1.0.5 behaviour, we also had:

In [5]: s.groupby(["idx1", "idx2"], group_keys=False).rolling(1).mean() 
Out[5]: 
idx1  idx2
val1  val1    1.0
      val1    2.0
val2  val2    3.0
dtype: float64

In [9]: pd.__version__   
Out[9]: '1.0.5'

So this PR then deviates from that for this case.

(I know the influence of group_keys=False has been considered a bug, but we could also reconsider that, since the above seems to actually give the desired result?)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The group_keys result different I think is due to the implementation change in groupby().rolling()

Before groupby().rolling() under the hood was groupby().apply(lambda x: x.rolling()...) and therefore group_keys impacted the result (since the argument is only applicable for groupby().apply()).

After groupby().rolling() moved away from using apply, group_keys didn't impact the result.

So IMO, group_keys shouldn't have ever really influenced the result since groupby().apply() was never called directly from the user.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you add testing for group_keys in any event?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

),
)
tm.assert_series_equal(result, expected)

def test_groupby_rolling_index_level_and_column_label(self):
# The groupby keys should not appear as a resulting column
arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]
index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))

Expand All @@ -581,7 +589,12 @@ def test_groupby_rolling_index_level_and_column_label(self):
expected = DataFrame(
{"B": [0.0, 1.0, 2.0]},
index=MultiIndex.from_tuples(
[("val1", 1), ("val1", 1), ("val2", 2)], names=["idx1", "A"]
[
("val1", 1, "val1", "val1"),
("val1", 1, "val1", "val1"),
("val2", 2, "val2", "val2"),
],
names=["idx1", "A", "idx1", "idx2"],
),
)
tm.assert_frame_equal(result, expected)
Expand Down Expand Up @@ -640,6 +653,30 @@ def test_groupby_rolling_resulting_multiindex(self):
)
tm.assert_index_equal(result.index, expected_index)

def test_groupby_level(self):
# GH 38523
arrays = [
["Falcon", "Falcon", "Parrot", "Parrot"],
["Captive", "Wild", "Captive", "Wild"],
]
index = MultiIndex.from_arrays(arrays, names=("Animal", "Type"))
df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index)
result = df.groupby(level=0)["Max Speed"].rolling(2).sum()
expected = Series(
[np.nan, 740.0, np.nan, 50.0],
index=MultiIndex.from_tuples(
[
("Falcon", "Falcon", "Captive"),
("Falcon", "Falcon", "Wild"),
("Parrot", "Parrot", "Captive"),
("Parrot", "Parrot", "Wild"),
],
names=["Animal", "Animal", "Type"],
),
name="Max Speed",
)
tm.assert_series_equal(result, expected)


class TestExpanding:
def setup_method(self):
Expand Down