Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrades the version of black used in CI #131

Merged
merged 7 commits into from
May 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/unit-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ jobs:
run: |
pip install flake8-pytest-importorskip
pip install --upgrade click==8.0.4
pip install black==21.12b0
pip install black==24.4.2
pip install flake8==4.0.1

- name: Lint and Format Check with Flake8 and Black
Expand Down
5 changes: 0 additions & 5 deletions hatchet/query/compound.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@


class CompoundQuery(object):

"""Base class for all types of compound queries."""

def __init__(self, *queries):
Expand Down Expand Up @@ -48,7 +47,6 @@ def _apply_op_to_results(self, subquery_results):


class ConjunctionQuery(CompoundQuery):

"""A compound query that combines the results of its subqueries
using set conjunction.
"""
Expand Down Expand Up @@ -83,7 +81,6 @@ def _apply_op_to_results(self, subquery_results, graph):


class DisjunctionQuery(CompoundQuery):

"""A compound query that combines the results of its subqueries
using set disjunction.
"""
Expand Down Expand Up @@ -118,7 +115,6 @@ def _apply_op_to_results(self, subquery_results, graph):


class ExclusiveDisjunctionQuery(CompoundQuery):

"""A compound query that combines the results of its subqueries
using exclusive set disjunction.
"""
Expand Down Expand Up @@ -153,7 +149,6 @@ def _apply_op_to_results(self, subquery_results, graph):


class NegationQuery(CompoundQuery):

"""A compound query that inverts/negates the result of
its single subquery.
"""
Expand Down
1 change: 0 additions & 1 deletion hatchet/query/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@


class QueryEngine:

"""Class for applying queries to GraphFrames."""

def __init__(self):
Expand Down
160 changes: 100 additions & 60 deletions hatchet/query/string_dialect.py
Original file line number Diff line number Diff line change
Expand Up @@ -482,9 +482,11 @@ def _parse_str_eq(self, obj):
None,
obj.name,
'df_row[{}] == "{}"'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], str)".format(
Expand All @@ -500,9 +502,11 @@ def _parse_str_eq_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
'df_row[{}].apply(lambda elem: elem == "{}")'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand All @@ -519,9 +523,11 @@ def _parse_str_starts_with(self, obj):
None,
obj.name,
'df_row[{}].startswith("{}")'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], str)".format(
Expand All @@ -537,9 +543,11 @@ def _parse_str_starts_with_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
'df_row[{}].apply(lambda elem: elem.startswith("{}"))'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand All @@ -556,9 +564,11 @@ def _parse_str_ends_with(self, obj):
None,
obj.name,
'df_row[{}].endswith("{}")'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], str)".format(
Expand All @@ -574,9 +584,11 @@ def _parse_str_ends_with_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
'df_row[{}].apply(lambda elem: elem.endswith("{}"))'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand All @@ -594,9 +606,11 @@ def _parse_str_contains(self, obj):
obj.name,
'"{}" in df_row[{}]'.format(
obj.val,
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
),
"isinstance(df_row[{}], str)".format(
str(tuple(obj.prop.ids))
Expand All @@ -611,9 +625,11 @@ def _parse_str_contains_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
'df_row[{}].apply(lambda elem: "{}" in elem)'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand All @@ -631,9 +647,11 @@ def _parse_str_match(self, obj):
obj.name,
're.match("{}", df_row[{}]) is not None'.format(
obj.val,
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
),
"isinstance(df_row[{}], str)".format(
str(tuple(obj.prop.ids))
Expand All @@ -648,9 +666,11 @@ def _parse_str_match_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
'df_row[{}].apply(lambda elem: re.match("{}", elem) is not None)'.format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down Expand Up @@ -748,9 +768,11 @@ def _parse_num_eq(self, obj):
None,
obj.name,
"df_row[{}] == {}".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], Real)".format(
Expand Down Expand Up @@ -825,9 +847,11 @@ def _parse_num_eq_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
"df_row[{}].apply(lambda elem: elem == {})".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down Expand Up @@ -894,9 +918,11 @@ def _parse_num_lt(self, obj):
None,
obj.name,
"df_row[{}] < {}".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], Real)".format(
Expand Down Expand Up @@ -964,9 +990,11 @@ def _parse_num_lt_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
"df_row[{}].apply(lambda elem: elem < {})".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down Expand Up @@ -1033,9 +1061,11 @@ def _parse_num_gt(self, obj):
None,
obj.name,
"df_row[{}] > {}".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], Real)".format(
Expand Down Expand Up @@ -1103,9 +1133,11 @@ def _parse_num_gt_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
"df_row[{}].apply(lambda elem: elem > {})".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down Expand Up @@ -1172,9 +1204,11 @@ def _parse_num_lte(self, obj):
None,
obj.name,
"df_row[{}] <= {}".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], Real)".format(
Expand Down Expand Up @@ -1242,9 +1276,11 @@ def _parse_num_lte_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
"df_row[{}].apply(lambda elem: elem <= {})".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down Expand Up @@ -1311,9 +1347,11 @@ def _parse_num_gte(self, obj):
None,
obj.name,
"df_row[{}] >= {}".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
),
"isinstance(df_row[{}], Real)".format(
Expand Down Expand Up @@ -1381,9 +1419,11 @@ def _parse_num_gte_multi_idx(self, obj):
obj.name,
self._add_aggregation_call_to_multi_idx_predicate(
"df_row[{}].apply(lambda elem: elem >= {})".format(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0]),
(
str(tuple(obj.prop.ids))
if len(obj.prop.ids) > 1
else "'{}'".format(obj.prop.ids[0])
),
obj.val,
)
),
Expand Down
4 changes: 2 additions & 2 deletions hatchet/readers/caliper_native_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,8 @@ def _create_metric_df(self, metrics):

def _reset_metrics(self, metrics):
"""Since the initial functions (i.e. main) are only called once, this keeps a small subset
of the timeseries data and resets the rest so future iterations will be filled with nans"""
of the timeseries data and resets the rest so future iterations will be filled with nans
"""
new_mets = []
cols_to_keep = [
"nid",
Expand All @@ -106,7 +107,6 @@ def _reset_metrics(self, metrics):
return new_mets

def read_metrics(self, ctx="path"):

"""append each metrics table to a list and return the list, split on timeseries_level if exists"""
metric_dfs = []
all_metrics = []
Expand Down
8 changes: 5 additions & 3 deletions hatchet/readers/caliper_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,9 +247,11 @@ def read(self):
# we will only reach here if path is the "secondary"
# hierarchy in the data
self.df_json_data["path"] = self.df_json_data["path"].apply(
lambda x: None
if (math.isnan(x))
else self.json_nodes[int(x)]["label"]
lambda x: (
None
if (math.isnan(x))
else self.json_nodes[int(x)]["label"]
)
)
else:
self.df_json_data[self.json_cols[idx]] = self.df_json_data[
Expand Down
2 changes: 1 addition & 1 deletion hatchet/readers/gprof_dot_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def create_graph(self):

# add all nodes with no parents to the list of roots
list_roots = []
for (key, val) in self.name_to_hnode.items():
for key, val in self.name_to_hnode.items():
if not val.parents:
list_roots.append(val)

Expand Down
Loading
Loading