Skip to content

Commit

Permalink
CLN: C408 Unnecessary dict call - rewrite as a literal #38138 (#38383)
Browse files Browse the repository at this point in the history
* last dict fixings

* last dict fixings

* last dict fixings

* last dict fixings
  • Loading branch information
UrielMaD authored Dec 9, 2020
1 parent 32bebdb commit b3ed7f9
Show file tree
Hide file tree
Showing 10 changed files with 163 additions and 159 deletions.
118 changes: 59 additions & 59 deletions pandas/tests/io/generate_legacy_storage_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,16 +142,16 @@ def create_data():
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
}

scalars = dict(timestamp=Timestamp("20130101"), period=Period("2012", "M"))

index = dict(
int=Index(np.arange(10)),
date=date_range("20130101", periods=10),
period=period_range("2013-01-01", freq="M", periods=10),
float=Index(np.arange(10, dtype=np.float64)),
uint=Index(np.arange(10, dtype=np.uint64)),
timedelta=timedelta_range("00:00:00", freq="30T", periods=10),
)
scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}

index = {
"int": Index(np.arange(10)),
"date": date_range("20130101", periods=10),
"period": period_range("2013-01-01", freq="M", periods=10),
"float": Index(np.arange(10, dtype=np.float64)),
"uint": Index(np.arange(10, dtype=np.uint64)),
"timedelta": timedelta_range("00:00:00", freq="30T", periods=10),
}

index["range"] = RangeIndex(10)

Expand All @@ -160,8 +160,8 @@ def create_data():

index["interval"] = interval_range(0, periods=10)

mi = dict(
reg2=MultiIndex.from_tuples(
mi = {
"reg2": MultiIndex.from_tuples(
tuple(
zip(
*[
Expand All @@ -172,35 +172,35 @@ def create_data():
),
names=["first", "second"],
)
)
}

series = dict(
float=Series(data["A"]),
int=Series(data["B"]),
mixed=Series(data["E"]),
ts=Series(
series = {
"float": Series(data["A"]),
"int": Series(data["B"]),
"mixed": Series(data["E"]),
"ts": Series(
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
),
mi=Series(
"mi": Series(
np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
),
),
dup=Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
cat=Series(Categorical(["foo", "bar", "baz"])),
dt=Series(date_range("20130101", periods=5)),
dt_tz=Series(date_range("20130101", periods=5, tz="US/Eastern")),
period=Series([Period("2000Q1")] * 5),
)
"dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
"cat": Series(Categorical(["foo", "bar", "baz"])),
"dt": Series(date_range("20130101", periods=5)),
"dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
"period": Series([Period("2000Q1")] * 5),
}

mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
frame = dict(
float=DataFrame({"A": series["float"], "B": series["float"] + 1}),
int=DataFrame({"A": series["int"], "B": series["int"] + 1}),
mixed=DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
mi=DataFrame(
frame = {
"float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
"int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
"mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
"mi": DataFrame(
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(
Expand All @@ -214,45 +214,45 @@ def create_data():
names=["first", "second"],
),
),
dup=DataFrame(
"dup": DataFrame(
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
),
cat_onecol=DataFrame({"A": Categorical(["foo", "bar"])}),
cat_and_float=DataFrame(
"cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
"cat_and_float": DataFrame(
{
"A": Categorical(["foo", "bar", "baz"]),
"B": np.arange(3).astype(np.int64),
}
),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame(
"mixed_dup": mixed_dup_df,
"dt_mixed_tzs": DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
),
dt_mixed2_tzs=DataFrame(
"dt_mixed2_tzs": DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
"C": Timestamp("20130603", tz="UTC"),
},
index=range(5),
),
)
}

cat = dict(
int8=Categorical(list("abcdefg")),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)),
)
cat = {
"int8": Categorical(list("abcdefg")),
"int16": Categorical(np.arange(1000)),
"int32": Categorical(np.arange(10000)),
}

timestamp = dict(
normal=Timestamp("2011-01-01"),
nat=NaT,
tz=Timestamp("2011-01-01", tz="US/Eastern"),
)
timestamp = {
"normal": Timestamp("2011-01-01"),
"nat": NaT,
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
}

timestamp["freq"] = Timestamp("2011-01-01", freq="D")
timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")
Expand Down Expand Up @@ -282,18 +282,18 @@ def create_data():
"Minute": Minute(1),
}

return dict(
series=series,
frame=frame,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp,
offsets=off,
)
return {
"series": series,
"frame": frame,
"index": index,
"scalars": scalars,
"mi": mi,
"sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
"sp_frame": {"float": _create_sp_frame()},
"cat": cat,
"timestamp": timestamp,
"offsets": off,
}


def create_pickle_data():
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/parser/test_comment.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_comment(all_parsers, na_values):


@pytest.mark.parametrize(
"read_kwargs", [dict(), dict(lineterminator="*"), dict(delim_whitespace=True)]
"read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}]
)
def test_line_comment(all_parsers, read_kwargs):
parser = all_parsers
Expand Down
30 changes: 15 additions & 15 deletions pandas/tests/io/parser/test_dialect.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@
@pytest.fixture
def custom_dialect():
dialect_name = "weird"
dialect_kwargs = dict(
doublequote=False,
escapechar="~",
delimiter=":",
skipinitialspace=False,
quotechar="~",
quoting=3,
)
dialect_kwargs = {
"doublequote": False,
"escapechar": "~",
"delimiter": ":",
"skipinitialspace": False,
"quotechar": "~",
"quoting": 3,
}
return dialect_name, dialect_kwargs


Expand Down Expand Up @@ -91,7 +91,7 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val
data = "a:b\n1:2"

warning_klass = None
kwds = dict()
kwds = {}

# arg=None tests when we pass in the dialect without any other arguments.
if arg is not None:
Expand All @@ -114,12 +114,12 @@ def test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, val
@pytest.mark.parametrize(
"kwargs,warning_klass",
[
(dict(sep=","), None), # sep is default --> sep_override=True
(dict(sep="."), ParserWarning), # sep isn't default --> sep_override=False
(dict(delimiter=":"), None), # No conflict
(dict(delimiter=None), None), # Default arguments --> sep_override=True
(dict(delimiter=","), ParserWarning), # Conflict
(dict(delimiter="."), ParserWarning), # Conflict
({"sep": ","}, None), # sep is default --> sep_override=True
({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False
({"delimiter": ":"}, None), # No conflict
({"delimiter": None}, None), # Default arguments --> sep_override=True
({"delimiter": ","}, ParserWarning), # Conflict
({"delimiter": "."}, ParserWarning), # Conflict
],
ids=[
"sep-override-true",
Expand Down
12 changes: 6 additions & 6 deletions pandas/tests/io/parser/test_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def test_utf16_bom_skiprows(all_parsers, sep, encoding):
",", sep
)
path = f"__{tm.rands(10)}__.csv"
kwargs = dict(sep=sep, skiprows=2)
kwargs = {"sep": sep, "skiprows": 2}
utf8 = "utf-8"

with tm.ensure_clean(path) as path:
Expand Down Expand Up @@ -91,17 +91,17 @@ def test_unicode_encoding(all_parsers, csv_dir_path):
"data,kwargs,expected",
[
# Basic test
("a\n1", dict(), DataFrame({"a": [1]})),
("a\n1", {}, DataFrame({"a": [1]})),
# "Regular" quoting
('"a"\n1', dict(quotechar='"'), DataFrame({"a": [1]})),
('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
# Test in a data row instead of header
("b\n1", dict(names=["a"]), DataFrame({"a": ["b", "1"]})),
("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
# Test in empty data row with skipping
("\n1", dict(names=["a"], skip_blank_lines=True), DataFrame({"a": [1]})),
("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
# Test in empty data row without skipping
(
"\n1",
dict(names=["a"], skip_blank_lines=False),
{"names": ["a"], "skip_blank_lines": False},
DataFrame({"a": [np.nan, 1]}),
),
],
Expand Down
Loading

0 comments on commit b3ed7f9

Please sign in to comment.