diff --git a/torchsig/transforms/deep_learning_techniques/__init__.py b/torchsig/transforms/deep_learning_techniques/__init__.py index 58b4958..8aeecd5 100644 --- a/torchsig/transforms/deep_learning_techniques/__init__.py +++ b/torchsig/transforms/deep_learning_techniques/__init__.py @@ -1,2 +1,2 @@ from .dlt import * -from .functional import * +from .dlt_functional import * diff --git a/torchsig/transforms/deep_learning_techniques/dlt.py b/torchsig/transforms/deep_learning_techniques/dlt.py index ef1e742..3be8742 100644 --- a/torchsig/transforms/deep_learning_techniques/dlt.py +++ b/torchsig/transforms/deep_learning_techniques/dlt.py @@ -7,7 +7,7 @@ from torchsig.transforms.wireless_channel import TargetSNR from torchsig.transforms.functional import to_distribution, uniform_continuous_distribution, uniform_discrete_distribution from torchsig.transforms.functional import NumericParameter, FloatParameter -from torchsig.transforms.deep_learning_techniques import functional +from torchsig.transforms.deep_learning_techniques import dlt_functional class DatasetBasebandMixUp(SignalTransform): @@ -356,10 +356,10 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description # Perform data augmentation - new_data.iq_data = functional.cut_out(data.iq_data, cut_start, cut_dur, cut_type) + new_data.iq_data = dlt_functional.cut_out(data.iq_data, cut_start, cut_dur, cut_type) else: - new_data = functional.cut_out(data, cut_start, cut_dur, cut_type) + new_data = dlt_functional.cut_out(data, cut_start, cut_dur, cut_type) return new_data @@ -408,9 +408,9 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.patch_shuffle(data.iq_data, patch_size, shuffle_ratio) + new_data.iq_data = dlt_functional.patch_shuffle(data.iq_data, patch_size, shuffle_ratio) else: - new_data = functional.patch_shuffle(data, patch_size, shuffle_ratio) + new_data = dlt_functional.patch_shuffle(data, patch_size, shuffle_ratio) return new_data diff --git a/torchsig/transforms/deep_learning_techniques/functional.py b/torchsig/transforms/deep_learning_techniques/dlt_functional.py similarity index 100% rename from torchsig/transforms/deep_learning_techniques/functional.py rename to torchsig/transforms/deep_learning_techniques/dlt_functional.py diff --git a/torchsig/transforms/expert_feature/__init__.py b/torchsig/transforms/expert_feature/__init__.py index ec96146..01ac526 100644 --- a/torchsig/transforms/expert_feature/__init__.py +++ b/torchsig/transforms/expert_feature/__init__.py @@ -1,2 +1,2 @@ from .eft import * -from .functional import * +from .eft_functional import * diff --git a/torchsig/transforms/expert_feature/eft.py b/torchsig/transforms/expert_feature/eft.py index a71709d..aecbfdb 100644 --- a/torchsig/transforms/expert_feature/eft.py +++ b/torchsig/transforms/expert_feature/eft.py @@ -2,7 +2,7 @@ from typing import Callable, Tuple, Any from torchsig.utils.types import SignalData -from torchsig.transforms.expert_feature import functional as F +from torchsig.transforms.expert_feature import eft_functional as F from torchsig.transforms.transforms import SignalTransform diff --git a/torchsig/transforms/expert_feature/functional.py b/torchsig/transforms/expert_feature/eft_functional.py similarity index 100% rename from torchsig/transforms/expert_feature/functional.py rename to torchsig/transforms/expert_feature/eft_functional.py diff --git a/torchsig/transforms/signal_processing/__init__.py b/torchsig/transforms/signal_processing/__init__.py index a6ce1d4..22642dc 100644 --- a/torchsig/transforms/signal_processing/__init__.py +++ b/torchsig/transforms/signal_processing/__init__.py @@ -1,2 +1,2 @@ from .sp import * -from .functional import * +from .sp_functional import * diff --git a/torchsig/transforms/signal_processing/sp.py b/torchsig/transforms/signal_processing/sp.py index 4f04770..77dedc5 100644 --- a/torchsig/transforms/signal_processing/sp.py +++ b/torchsig/transforms/signal_processing/sp.py @@ -4,7 +4,7 @@ from torchsig.utils.types import SignalData, SignalDescription from torchsig.transforms.transforms import SignalTransform -from torchsig.transforms.signal_processing import functional as F +from torchsig.transforms.signal_processing import sp_functional as F from torchsig.transforms.functional import NumericParameter, to_distribution diff --git a/torchsig/transforms/signal_processing/functional.py b/torchsig/transforms/signal_processing/sp_functional.py similarity index 100% rename from torchsig/transforms/signal_processing/functional.py rename to torchsig/transforms/signal_processing/sp_functional.py diff --git a/torchsig/transforms/system_impairment/__init__.py b/torchsig/transforms/system_impairment/__init__.py index 1b81242..fc6988b 100644 --- a/torchsig/transforms/system_impairment/__init__.py +++ b/torchsig/transforms/system_impairment/__init__.py @@ -1,2 +1,2 @@ from .si import * -from .functional import * +from .si_functional import * diff --git a/torchsig/transforms/system_impairment/si.py b/torchsig/transforms/system_impairment/si.py index 8f0e29a..7dbf8fa 100644 --- a/torchsig/transforms/system_impairment/si.py +++ b/torchsig/transforms/system_impairment/si.py @@ -5,7 +5,7 @@ from torchsig.utils.types import SignalData, SignalDescription from torchsig.transforms.transforms import SignalTransform -from torchsig.transforms.system_impairment import functional +from torchsig.transforms.system_impairment import si_functional from torchsig.transforms.functional import NumericParameter, IntParameter, FloatParameter from torchsig.transforms.functional import to_distribution, uniform_continuous_distribution, uniform_discrete_distribution @@ -67,13 +67,13 @@ def __call__(self, data: Any) -> Any: ) # Apply data transformation - new_data.iq_data = functional.fractional_shift( + new_data.iq_data = si_functional.fractional_shift( data.iq_data, self.taps, self.interp_rate, -decimal_part # this needed to be negated to be consistent with the previous implementation ) - new_data.iq_data = functional.time_shift(new_data.iq_data, int(integer_part)) + new_data.iq_data = si_functional.time_shift(new_data.iq_data, int(integer_part)) # Update SignalDescription new_signal_description = [] @@ -91,13 +91,13 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description else: - new_data = functional.fractional_shift( + new_data = si_functional.fractional_shift( data, self.taps, self.interp_rate, -decimal_part # this needed to be negated to be consistent with the previous implementation ) - new_data = functional.time_shift(new_data, int(integer_part)) + new_data = si_functional.time_shift(new_data, int(integer_part)) return new_data @@ -167,7 +167,7 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.time_crop(iq_data, start, self.length) + new_data.iq_data = si_functional.time_crop(iq_data, start, self.length) # Update SignalDescription new_signal_description = [] @@ -190,7 +190,7 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description else: - new_data = functional.time_crop(data, start, self.length) + new_data = si_functional.time_crop(data, start, self.length) return new_data @@ -228,10 +228,10 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.time_reversal(data.iq_data) + new_data.iq_data = si_functional.time_reversal(data.iq_data) if undo_spec_inversion: # If spectral inversion not desired, reverse effect - new_data.iq_data = functional.spectral_inversion(new_data.iq_data) + new_data.iq_data = si_functional.spectral_inversion(new_data.iq_data) # Update SignalDescription new_signal_description = [] @@ -258,10 +258,10 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description else: - new_data = functional.time_reversal(data) + new_data = si_functional.time_reversal(data) if undo_spec_inversion: # If spectral inversion not desired, reverse effect - new_data = functional.spectral_inversion(new_data) + new_data = si_functional.spectral_inversion(new_data) return new_data @@ -284,10 +284,10 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.amplitude_reversal(data.iq_data) + new_data.iq_data = si_functional.amplitude_reversal(data.iq_data) else: - new_data = functional.amplitude_reversal(data) + new_data = si_functional.amplitude_reversal(data) return new_data @@ -373,13 +373,13 @@ def __call__(self, data: Any) -> Any: # Apply data augmentation if avoid_aliasing: # If any potential aliasing detected, perform shifting at higher sample rate - new_data.iq_data = functional.freq_shift_avoid_aliasing(data.iq_data, freq_shift) + new_data.iq_data = si_functional.freq_shift_avoid_aliasing(data.iq_data, freq_shift) else: # Otherwise, use faster freq shifter - new_data.iq_data = functional.freq_shift(data.iq_data, freq_shift) + new_data.iq_data = si_functional.freq_shift(data.iq_data, freq_shift) else: - new_data = functional.freq_shift(data, freq_shift) + new_data = si_functional.freq_shift(data, freq_shift) return new_data @@ -600,7 +600,7 @@ def __call__(self, data: Any) -> Any: ref_level_db = np.random.uniform(-.5 + self.ref_level_db, .5 + self.ref_level_db, 1) - iq_data = functional.agc( + iq_data = si_functional.agc( np.ascontiguousarray(iq_data, dtype=np.complex64), np.float64(self.initial_gain_db), np.float64(alpha_smooth), @@ -677,14 +677,14 @@ def __call__(self, data: Any) -> Any: dc_offset = self.dc_offset() if isinstance(data, SignalData): - data.iq_data = functional.iq_imbalance( + data.iq_data = si_functional.iq_imbalance( data.iq_data, amp_imbalance, phase_imbalance, dc_offset ) else: - data = functional.iq_imbalance( + data = si_functional.iq_imbalance( data, amp_imbalance, phase_imbalance, @@ -742,9 +742,9 @@ def __call__(self, data: Any) -> Any: upper_freq = self.upper_freq() if np.random.rand() < self.upper_cut_apply else 1.0 order = self.order() if isinstance(data, SignalData): - data.iq_data = functional.roll_off(data.iq_data, low_freq, upper_freq, int(order)) + data.iq_data = si_functional.roll_off(data.iq_data, low_freq, upper_freq, int(order)) else: - data = functional.roll_off(data, low_freq, upper_freq, int(order)) + data = si_functional.roll_off(data, low_freq, upper_freq, int(order)) return data @@ -767,10 +767,10 @@ def __call__(self, data: Any) -> Any: ) # Apply data augmentation - new_data.iq_data = functional.add_slope(data.iq_data) + new_data.iq_data = si_functional.add_slope(data.iq_data) else: - new_data = functional.add_slope(data) + new_data = si_functional.add_slope(data) return new_data @@ -792,7 +792,7 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.spectral_inversion(data.iq_data) + new_data.iq_data = si_functional.spectral_inversion(data.iq_data) # Update SignalDescription new_signal_description = [] @@ -812,7 +812,7 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description else: - new_data = functional.spectral_inversion(data) + new_data = si_functional.spectral_inversion(data) return new_data @@ -851,10 +851,10 @@ def __call__(self, data: Any) -> Any: new_data.signal_description = new_signal_description # Perform data augmentation - new_data.iq_data = functional.channel_swap(data.iq_data) + new_data.iq_data = si_functional.channel_swap(data.iq_data) else: - new_data = functional.channel_swap(data) + new_data = si_functional.channel_swap(data) return new_data @@ -901,10 +901,10 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.mag_rescale(data.iq_data, start, scale) + new_data.iq_data = si_functional.mag_rescale(data.iq_data, start, scale) else: - new_data = functional.mag_rescale(data, start, scale) + new_data = si_functional.mag_rescale(data, start, scale) return new_data @@ -970,14 +970,14 @@ def __call__(self, data: Any) -> Any: drop_sizes = self.size(drop_instances).astype(int) drop_starts = np.random.uniform(1, data.iq_data.shape[0]-max(drop_sizes)-1, drop_instances).astype(int) - new_data.iq_data = functional.drop_samples(data.iq_data, drop_starts, drop_sizes, fill) + new_data.iq_data = si_functional.drop_samples(data.iq_data, drop_starts, drop_sizes, fill) else: drop_instances = int(data.shape[0] * drop_rate) drop_sizes = self.size(drop_instances).astype(int) drop_starts = np.random.uniform(0, data.shape[0]-max(drop_sizes), drop_instances).astype(int) - new_data = functional.drop_samples(data, drop_starts, drop_sizes, fill) + new_data = si_functional.drop_samples(data, drop_starts, drop_sizes, fill) return new_data @@ -1022,10 +1022,10 @@ def __call__(self, data: Any) -> Any: ) # Perform data augmentation - new_data.iq_data = functional.quantize(data.iq_data, num_levels, round_type) + new_data.iq_data = si_functional.quantize(data.iq_data, num_levels, round_type) else: - new_data = functional.quantize(data, num_levels, round_type) + new_data = si_functional.quantize(data, num_levels, round_type) return new_data @@ -1063,10 +1063,10 @@ def __call__(self, data: Any) -> Any: ) # Apply data augmentation - new_data.iq_data = functional.clip(data.iq_data, clip_percentage) + new_data.iq_data = si_functional.clip(data.iq_data, clip_percentage) else: - new_data = functional.clip(data, clip_percentage) + new_data = si_functional.clip(data, clip_percentage) return new_data @@ -1117,8 +1117,8 @@ def __call__(self, data: Any) -> Any: ) # Apply data augmentation - new_data.iq_data = functional.random_convolve(data.iq_data, num_taps, alpha) + new_data.iq_data = si_functional.random_convolve(data.iq_data, num_taps, alpha) else: - new_data = functional.random_convolve(data, num_taps, alpha) + new_data = si_functional.random_convolve(data, num_taps, alpha) return new_data diff --git a/torchsig/transforms/system_impairment/functional.py b/torchsig/transforms/system_impairment/si_functional.py similarity index 100% rename from torchsig/transforms/system_impairment/functional.py rename to torchsig/transforms/system_impairment/si_functional.py diff --git a/torchsig/transforms/wireless_channel/__init__.py b/torchsig/transforms/wireless_channel/__init__.py index 4da9f8b..fcfb11f 100644 --- a/torchsig/transforms/wireless_channel/__init__.py +++ b/torchsig/transforms/wireless_channel/__init__.py @@ -1,2 +1,2 @@ from .wce import * -from .functional import * +from .wce_functional import * diff --git a/torchsig/transforms/wireless_channel/wce.py b/torchsig/transforms/wireless_channel/wce.py index 1fe902c..851e931 100644 --- a/torchsig/transforms/wireless_channel/wce.py +++ b/torchsig/transforms/wireless_channel/wce.py @@ -4,7 +4,7 @@ from torchsig.utils.types import SignalData, SignalDescription from torchsig.transforms.transforms import SignalTransform -from torchsig.transforms.wireless_channel import functional as F +from torchsig.transforms.wireless_channel import wce_functional as F from torchsig.transforms.functional import NumericParameter, FloatParameter, IntParameter from torchsig.transforms.functional import to_distribution, uniform_continuous_distribution, uniform_discrete_distribution diff --git a/torchsig/transforms/wireless_channel/functional.py b/torchsig/transforms/wireless_channel/wce_functional.py similarity index 100% rename from torchsig/transforms/wireless_channel/functional.py rename to torchsig/transforms/wireless_channel/wce_functional.py