Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[refactor] Ruff fixes #1327

Merged
merged 13 commits into from
May 16, 2023
11 changes: 7 additions & 4 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@
import os
import sys

import sphinx_fontawesome
import sphinx_fontawesome # noqa: F401
from sphinx.ext.autodoc import between

from typing import Any, Dict

# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("../.."))

from typing import Any, Dict

# -- Project information -----------------------------------------------------

Expand Down Expand Up @@ -71,12 +72,14 @@
html_favicon = "images/np_favicon.png"
html_logo = "images/np_highres_docs.svg"
# html_logo = "images/logo.png"
font_stack = "-apple-system,'system-ui','Segoe UI',Helvetica,Arial,sans-serif,'Apple Color Emoji','Segoe UI Emoji'"
font_stack_mono = "'SFMono-Regular',Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace"
html_theme_options: Dict[str, Any] = {
"sidebar_hide_name": True,
"navigation_with_keys": True,
"light_css_variables": {
"font-stack": "-apple-system, 'system-ui', 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji'",
"font-stack--monospace": "'SFMono-Regular',Menlo,Consolas,Monaco,Liberation Mono,Lucida Console,monospace",
"font-stack": font_stack,
"font-stack--monospace": font_stack_mono,
},
}

Expand Down
3 changes: 2 additions & 1 deletion neuralprophet/components/future_regressors/linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,8 @@ def scalar_features_effects(self, features, params, indices=None):
Parameters
----------
features : torch.Tensor, float
Features (either additive or multiplicative) related to event component dims (batch, n_forecasts, n_features)
Features (either additive or multiplicative) related to event component dims (batch, n_forecasts,
n_features)
params : nn.Parameter
Params (either additive or multiplicative) related to events
indices : list of int
Expand Down
3 changes: 2 additions & 1 deletion neuralprophet/components/seasonality/fourier.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ def __init__(self, config, id_list, quantiles, num_seasonalities_modelled, n_for
# Seasonality parameters for global or local modelling
self.season_params = nn.ParameterDict(
{
# dimensions - [no. of quantiles, num_seasonalities_modelled, no. of fourier terms for each seasonality]
# dimensions -
# [no. of quantiles, num_seasonalities_modelled, no. of fourier terms for each seasonality]
name: init_parameter(dims=[len(self.quantiles)] + [self.num_seasonalities_modelled] + [dim])
for name, dim in self.season_dims.items()
}
Expand Down
35 changes: 19 additions & 16 deletions neuralprophet/components/trend/piecewise_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,9 +111,11 @@ def add_regularization(self):
pass

def compute_k_t(self, current_segment, past_next_changepoint, meta_name_tensor_one_hot):
"""For segmentwise, k_t is the model parameter representing the trend slope(actually, trend slope-k_0) in the current_segment at time t (for each sample of the batch).
"""For segmentwise, k_t is the model parameter representing the trend slope(actually, trend slope-k_0) in the
current_segment at time t (for each sample of the batch).

For not segmentwise, k_t is the model parameter representing the difference between trend slope in the current_segment at time t and the trend slope in the previous segment (for each sample of the batch).
For not segmentwise, k_t is the model parameter representing the difference between trend slope in the
current_segment at time t and the trend slope in the previous segment (for each sample of the batch).

Parameters
----------
Expand All @@ -137,12 +139,9 @@ def compute_k_t(self, current_segment, past_next_changepoint, meta_name_tensor_o
pass

def compute_m_t(self, current_segment, past_next_changepoint, meta_name_tensor_one_hot):
"""m_t represents the value at the origin(t=0) that we would need to have so that if we use (k_t + k_0) as slope,

we reach the same value at time = chagepoint_start_of_segment_i

as we would reach by following the segmented slope (having in each segment the slope trend_deltas(i) + k_0)

"""m_t represents the value at the origin(t=0) that we would need to have so that if we use (k_t + k_0) as
slope, we reach the same value at time = chagepoint_start_of_segment_i as we would reach by following the
segmented slope (having in each segment the slope trend_deltas(i) + k_0)

Parameters
----------
Expand Down Expand Up @@ -206,16 +205,17 @@ def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts,

def compute_k_t(self, current_segment, past_next_changepoint, meta_name_tensor_one_hot=None):
"""This method overrides the method from the PiecewiseLinear class."""
# For segmentwise k_t, is the model parameter representing the trend slope(actually, trend slope-k_0) in the current_segment at time t (for each sample of the batch).
# For segmentwise k_t, is the model parameter representing the trend slope(actually, trend slope-k_0) in the
# current_segment at time t (for each sample of the batch).
# k_t = k_t(current_segment).
# dimensions - batch_size, n_forecasts, quantiles_size
k_t = torch.sum(
current_segment.unsqueeze(dim=2) * self.trend_deltas.permute(1, 0, 2).unsqueeze(1),
dim=-1,
)

# For not segmentwise k_t is the model parameter representing the difference between trend slope in the current_segment at time t
# and the trend slope in the previous segment (for each sample of the batch).
# For not segmentwise k_t is the model parameter representing the difference between trend slope in the
# current_segment at time t and the trend slope in the previous segment (for each sample of the batch).
if not self.segmentwise_trend:
# k_t = k_t(current_segment, previous_segment)
# dimensions - batch_size, n_forecasts, quantiles_size
Expand Down Expand Up @@ -252,7 +252,8 @@ def compute_m_t(self, current_segment, past_next_changepoint, meta_name_tensor_o
if not self.segmentwise_trend:
m_t = m_t.detach()
else:
# For discontinuous, trend_m is a parameter to optimize, as it is not defined just by trend_deltas & trend_k0
# For discontinuous, trend_m is a parameter to optimize, as it is not defined just
# by trend_deltas & trend_k0
# m_t = m_t(current_segment)
# dimensions - batch_size, n_forecasts, quantiles
m_t = torch.sum(current_segment.unsqueeze(dim=2) * self.trend_m.permute(1, 0, 2).unsqueeze(dim=0), dim=-1)
Expand All @@ -279,7 +280,8 @@ def __init__(self, config, id_list, quantiles, num_trends_modelled, n_forecasts,

def compute_k_t(self, current_segment, past_next_changepoint, meta_name_tensor_one_hot):
"""This method overrides the method from the PiecewiseLinear class."""
# For segmentwise k_t, is the model parameter representing the trend slope(actually, trend slope-k_0) in the current_segment at time t (for each sample of the batch).
# For segmentwise k_t, is the model parameter representing the trend slope(actually, trend slope-k_0) in the
# current_segment at time t (for each sample of the batch).
# k_t = k_t(current_segment, sample metadata)
# dimensions - quantiles, batch_size, segments (+ 1)
trend_deltas_by_sample = torch.sum(
Expand All @@ -288,8 +290,8 @@ def compute_k_t(self, current_segment, past_next_changepoint, meta_name_tensor_o
# dimensions - batch_size, n_forecasts, quantiles_size
k_t = torch.sum(current_segment.unsqueeze(dim=2) * trend_deltas_by_sample.permute(1, 0, 2).unsqueeze(1), dim=-1)

# For not segmentwise k_t is the model parameter representing the difference between trend slope in the current_segment at time t
# and the trend slope in the previous segment (for each sample of the batch).
# For not segmentwise k_t is the model parameter representing the difference between trend slope in the
# current_segment at time t and the trend slope in the previous segment (for each sample of the batch).
if not self.segmentwise_trend:
# k_t = k_t(current_segment, previous_segment, sample metadata)
previous_deltas_t = torch.sum(
Expand Down Expand Up @@ -335,7 +337,8 @@ def compute_m_t(self, current_segment, past_next_changepoint, meta_name_tensor_o
if not self.segmentwise_trend:
m_t = m_t.detach()
else:
# For discontinuous, trend_m is a parameter to optimize, as it is not defined just by trend_deltas & trend_k0
# For discontinuous, trend_m is a parameter to optimize, as it is not defined just
# by trend_deltas & trend_k0
# m_t = m_t(current_segment, sample metadata)
# dimensions - quantiles, batch_size, segments
m_t_0 = torch.sum(
Expand Down
13 changes: 8 additions & 5 deletions neuralprophet/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,14 @@ def get_data_params(self, df_name):
data_params = self.local_data_params[df_name]
elif self.unknown_data_normalization:
log.debug(
f"Dataset name {df_name!r} is not present in valid data_params but unknown_data_normalization is True. Using global_data_params"
f"Dataset name {df_name!r} is not present in valid data_params but unknown_data_normalization is \
True. Using global_data_params"
)
data_params = self.global_data_params
else:
raise ValueError(
f"Dataset name {df_name!r} missing from training data params. Set unknown_data_normalization to use global (average) normalization parameters."
f"Dataset name {df_name!r} missing from training data params. Set unknown_data_normalization to \
use global (average) normalization parameters."
)
return data_params

Expand Down Expand Up @@ -177,8 +179,8 @@ def set_auto_batch_epoch(

def set_optimizer(self):
"""
Set the optimizer and optimizer args. If optimizer is a string, then it will be converted to the corresponding torch optimizer.
The optimizer is not initialized yet as this is done in configure_optimizers in TimeNet.
Set the optimizer and optimizer args. If optimizer is a string, then it will be converted to the corresponding
torch optimizer. The optimizer is not initialized yet as this is done in configure_optimizers in TimeNet.
"""
self.optimizer, self.optimizer_args = utils_torch.create_optimizer_from_config(
self.optimizer, self.optimizer_args
Expand Down Expand Up @@ -207,7 +209,8 @@ def set_lr_finder_args(self, dataset_size, num_batches):
num_training = 150 + int(np.log10(100 + dataset_size) * 25)
if num_batches < num_training:
log.warning(
f"Learning rate finder: The number of batches ({num_batches}) is too small than the required number for the learning rate finder ({num_training}). The results might not be optimal."
f"Learning rate finder: The number of batches ({num_batches}) is too small than the required number \
for the learning rate finder ({num_training}). The results might not be optimal."
)
# num_training = num_batches
self.lr_finder_args.update(
Expand Down
20 changes: 13 additions & 7 deletions neuralprophet/data/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,8 @@ def _handle_missing_data(

Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to
automatically set frequency.
n_lags : int
Previous time series steps to include in auto-regression. Aka AR-order
n_forecasts : int
Expand Down Expand Up @@ -521,7 +522,8 @@ def _handle_missing_data_single_id(

Note
----
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to automatically set frequency.
Any valid frequency for pd.date_range, such as ``5min``, ``D``, ``MS`` or ``auto`` (default) to
automatically set frequency.
n_lags : int
Previous time series steps to include in auto-regression. Aka AR-order
n_forecasts : int
Expand Down Expand Up @@ -559,10 +561,13 @@ def _handle_missing_data_single_id(
if missing_dates > 0:
if config_missing.impute_missing:
log.info(f"{missing_dates} missing dates added.")
# FIX Issue#52
# Comment error raising to allow missing data for autoregression flow.
# else:
# raise ValueError(f"{missing_dates} missing dates found. Please preprocess data manually or set impute_missing to True.")
# FIX Issue#52
# Comment error raising to allow missing data for autoregression flow.
# else:
# raise ValueError(
# f"{missing_dates} missing dates found. Please preprocess data manually or set \
# impute_missing to True."
# )
# END FIX

if config_regressors is not None:
Expand Down Expand Up @@ -646,7 +651,8 @@ def _handle_missing_data_single_id(
log.info(f"{sum_na - remaining_na} NaN values in column {column} were auto-imputed.")
if remaining_na > 0:
log.warning(
f"More than {2 * config_missing.impute_linear + config_missing.impute_rolling} consecutive missing values encountered in column {column}. "
f"More than {2 * config_missing.impute_linear + config_missing.impute_rolling} consecutive \
missing values encountered in column {column}. "
f"{remaining_na} NA remain after auto-imputation. "
)
# FIX Issue#52
Expand Down
8 changes: 4 additions & 4 deletions neuralprophet/data/split.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,13 +227,13 @@ def _make_future_dataframe(
if nan_at_end > 0:
if max_lags > 0 and (nan_at_end + 1) >= max_lags:
raise ValueError(
f"{nan_at_end + 1} missing values were detected at the end of df before df was extended into the future. "
"Please make sure there are no NaN values at the end of df."
f"{nan_at_end + 1} missing values were detected at the end of df before df was extended into "
"the future. Please make sure there are no NaN values at the end of df."
)
df["y"].iloc[-(nan_at_end + 1) :].ffill(inplace=True)
log.warning(
f"{nan_at_end + 1} missing values were forward-filled at the end of df before df was extended into the future. "
"Please make sure there are no NaN values at the end of df."
f"{nan_at_end + 1} missing values were forward-filled at the end of df before df was extended into the "
"future. Please make sure there are no NaN values at the end of df."
)

if len(df) > 0:
Expand Down
Loading