From c94b353f14150b9f9be83fd97bb0c2727c5c7845 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Sat, 9 Nov 2024 17:58:38 +0100 Subject: [PATCH 1/7] ci(coverage): only use parallel mode in ci --- .github/workflows/tests.yml | 2 +- pyproject.toml | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5ab4a4e..c3199f6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -25,7 +25,7 @@ jobs: - name: Set up Python ${{ matrix.python-version }} run: uv python install ${{ matrix.python-version }} - name: Unit tests - run: uv run --resolution=${{ matrix.uv-resolution }} --all-extras coverage run -m pytest trainer tests + run: uv run --resolution=${{ matrix.uv-resolution }} --all-extras coverage run --parallel - name: Upload coverage data uses: actions/upload-artifact@v4 with: diff --git a/pyproject.toml b/pyproject.toml index 7376384..50c9751 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -98,6 +98,10 @@ lint.ignore = [ "F403", # init files may have star imports for now ] +[tool.coverage.report] +show_missing = true +skip_empty = true + [tool.coverage.run] -parallel = true -source = ["trainer"] +source = ["trainer", "tests"] +command_line = "-m pytest" From 77a9fd7478cb697e3d7c500b3d55aa2a3d497c03 Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Sun, 10 Nov 2024 10:46:40 +0100 Subject: [PATCH 2/7] fix: override output_path if provided in Trainer init --- trainer/trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/trainer/trainer.py b/trainer/trainer.py index 2e9d639..332b36c 100644 --- a/trainer/trainer.py +++ b/trainer/trainer.py @@ -192,7 +192,7 @@ def __init__( # pylint: disable=dangerous-default-value # override the output path if it is provided output_path = config.output_path if output_path is None else output_path # create a new output folder name - output_path = get_experiment_folder_path(config.output_path, config.run_name) + output_path = get_experiment_folder_path(output_path, config.run_name) os.makedirs(output_path, exist_ok=True) # copy training assets to the output folder From f91fc393068def239b1c47a82c2c17a0f8681b8b Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Sun, 10 Nov 2024 14:51:22 +0100 Subject: [PATCH 3/7] fix: use temp folder for test outputs --- examples/train_mnist.py | 1 - examples/train_simple_gan.py | 2 +- tests/test_continue_train.py | 27 ++++++++----------- tests/test_generic_utils.py | 7 ++--- tests/test_lr_schedulers.py | 5 ++-- tests/test_train_batch_size_finder.py | 8 ++---- tests/test_train_gan.py | 20 +++++++------- tests/test_train_mnist.py | 39 ++++++++++++++++++++++----- tests/utils/train_mnist.py | 1 - trainer/trainer.py | 20 +++++++------- 10 files changed, 72 insertions(+), 58 deletions(-) diff --git a/examples/train_mnist.py b/examples/train_mnist.py index 01ef01d..becc841 100644 --- a/examples/train_mnist.py +++ b/examples/train_mnist.py @@ -87,7 +87,6 @@ def main(): trainer = Trainer( train_args, config, - config.output_path, model=model, train_samples=model.get_data_loader(config, None, False, None, None, None), eval_samples=model.get_data_loader(config, None, True, None, None, None), diff --git a/examples/train_simple_gan.py b/examples/train_simple_gan.py index 908c53c..85e2b2a 100644 --- a/examples/train_simple_gan.py +++ b/examples/train_simple_gan.py @@ -166,6 +166,6 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r config.grad_clip = None model = GANModel() - trainer = Trainer(TrainerArgs(), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None) + trainer = Trainer(TrainerArgs(), config, model=model, gpu=0 if is_cuda else None) trainer.config.epochs = 10 trainer.fit() diff --git a/tests/test_continue_train.py b/tests/test_continue_train.py index cc6632b..e953b7a 100644 --- a/tests/test_continue_train.py +++ b/tests/test_continue_train.py @@ -1,32 +1,27 @@ -import glob -import os -import shutil - from tests import run_cli -def test_continue_train(): - output_path = "output/" - - command_train = "python tests/utils/train_mnist.py" +def test_continue_train(tmp_path): + command_train = f"python tests/utils/train_mnist.py --coqpit.output_path {tmp_path}" run_cli(command_train) - continue_path = max(glob.glob(os.path.join(output_path, "*/")), key=os.path.getmtime) - number_of_checkpoints = len(glob.glob(os.path.join(continue_path, "*.pth"))) + continue_path = max(tmp_path.iterdir(), key=lambda p: p.stat().st_mtime) + number_of_checkpoints = len(list(continue_path.glob("*.pth"))) # Continue training from the best model command_continue = f"python tests/utils/train_mnist.py --continue_path {continue_path} --coqpit.run_eval_steps=1" run_cli(command_continue) - assert number_of_checkpoints < len(glob.glob(os.path.join(continue_path, "*.pth"))) + assert number_of_checkpoints < len(list(continue_path.glob("*.pth"))) # Continue training from the last checkpoint - for best in glob.glob(os.path.join(continue_path, "best_model*")): - os.remove(best) + for best in continue_path.glob("best_model*"): + best.unlink() run_cli(command_continue) # Continue training from a specific checkpoint - restore_path = os.path.join(continue_path, "checkpoint_5.pth") - command_continue = f"python tests/utils/train_mnist.py --restore_path {restore_path}" + restore_path = continue_path / "checkpoint_5.pth" + command_continue = ( + f"python tests/utils/train_mnist.py --restore_path {restore_path} --coqpit.output_path {tmp_path}" + ) run_cli(command_continue) - shutil.rmtree(continue_path) diff --git a/tests/test_generic_utils.py b/tests/test_generic_utils.py index d36e99c..ef2a3ee 100644 --- a/tests/test_generic_utils.py +++ b/tests/test_generic_utils.py @@ -1,11 +1,8 @@ -from pathlib import Path - from trainer.generic_utils import remove_experiment_folder -def test_remove_experiment_folder(): - output_dir = Path("output") - run_dir = output_dir / "run" +def test_remove_experiment_folder(tmp_path): + run_dir = tmp_path / "run" run_dir.mkdir(exist_ok=True, parents=True) remove_experiment_folder(run_dir) diff --git a/tests/test_lr_schedulers.py b/tests/test_lr_schedulers.py index 6ebfaa2..24d8e51 100644 --- a/tests/test_lr_schedulers.py +++ b/tests/test_lr_schedulers.py @@ -1,4 +1,3 @@ -import os import time import torch @@ -10,7 +9,7 @@ is_cuda = torch.cuda.is_available() -def test_train_mnist(): +def test_train_mnist(tmp_path): model = MnistModel() # Test StepwiseGradualLR config = MnistModelConfig( @@ -23,7 +22,7 @@ def test_train_mnist(): }, scheduler_after_epoch=False, ) - trainer = Trainer(TrainerArgs(), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None) + trainer = Trainer(TrainerArgs(), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.train_loader = trainer.get_train_dataloader( trainer.training_assets, trainer.train_samples, diff --git a/tests/test_train_batch_size_finder.py b/tests/test_train_batch_size_finder.py index 209c061..ef1f7c2 100644 --- a/tests/test_train_batch_size_finder.py +++ b/tests/test_train_batch_size_finder.py @@ -1,5 +1,3 @@ -import os - import torch from tests.utils.mnist import MnistModel, MnistModelConfig @@ -8,11 +6,9 @@ is_cuda = torch.cuda.is_available() -def test_train_largest_batch_mnist(): +def test_train_largest_batch_mnist(tmp_path): model = MnistModel() - trainer = Trainer( - TrainerArgs(), MnistModelConfig(), model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None - ) + trainer = Trainer(TrainerArgs(), MnistModelConfig(), output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.fit_with_largest_batch_size(starting_batch_size=2048) loss1 = trainer.keep_avg_train["avg_loss"] diff --git a/tests/test_train_gan.py b/tests/test_train_gan.py index d02976d..a4b4c93 100644 --- a/tests/test_train_gan.py +++ b/tests/test_train_gan.py @@ -63,7 +63,7 @@ def forward(self, img): return validity -def test_overfit_mnist_simple_gan(): +def test_overfit_mnist_simple_gan(tmp_path): @dataclass class GANModelConfig(TrainerConfig): epochs: int = 1 @@ -137,7 +137,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r config.grad_clip = None model = GANModel() - trainer = Trainer(TrainerArgs(), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None) + trainer = Trainer(TrainerArgs(), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.config.epochs = 1 trainer.fit() @@ -155,7 +155,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r assert loss_g1 > loss_g2, f"Generator loss should decrease. {loss_g1} > {loss_g2}" -def test_overfit_accelerate_mnist_simple_gan(): +def test_overfit_accelerate_mnist_simple_gan(tmp_path): @dataclass class GANModelConfig(TrainerConfig): epochs: int = 1 @@ -231,7 +231,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r model = GANModel() trainer = Trainer( - TrainerArgs(use_accelerate=True), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None + TrainerArgs(use_accelerate=True), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None ) trainer.eval_epoch() @@ -249,7 +249,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r assert loss_g1 > loss_g2, f"Generator loss should decrease. {loss_g1} > {loss_g2}" -def test_overfit_manual_optimize_mnist_simple_gan(): +def test_overfit_manual_optimize_mnist_simple_gan(tmp_path): @dataclass class GANModelConfig(TrainerConfig): epochs: int = 1 @@ -342,7 +342,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r config.grad_clip = None model = GANModel() - trainer = Trainer(TrainerArgs(), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None) + trainer = Trainer(TrainerArgs(), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.config.epochs = 1 trainer.fit() @@ -360,7 +360,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r assert loss_g1 > loss_g2, f"Generator loss should decrease. {loss_g1} > {loss_g2}" -def test_overfit_manual_optimize_grad_accum_mnist_simple_gan(): +def test_overfit_manual_optimize_grad_accum_mnist_simple_gan(tmp_path): @dataclass class GANModelConfig(TrainerConfig): epochs: int = 1 @@ -456,7 +456,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r config.grad_clip = None model = GANModel() - trainer = Trainer(TrainerArgs(), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None) + trainer = Trainer(TrainerArgs(), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.config.epochs = 1 trainer.fit() @@ -474,7 +474,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r assert loss_g1 > loss_g2, f"Generator loss should decrease. {loss_g1} > {loss_g2}" -def test_overfit_manual_accelerate_optimize_grad_accum_mnist_simple_gan(): +def test_overfit_manual_accelerate_optimize_grad_accum_mnist_simple_gan(tmp_path): @dataclass class GANModelConfig(TrainerConfig): epochs: int = 1 @@ -573,7 +573,7 @@ def get_data_loader(self, config, assets, is_eval, samples, verbose, num_gpus, r model = GANModel() trainer = Trainer( - TrainerArgs(use_accelerate=True), config, model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None + TrainerArgs(use_accelerate=True), config, output_path=tmp_path, model=model, gpu=0 if is_cuda else None ) trainer.config.epochs = 1 diff --git a/tests/test_train_mnist.py b/tests/test_train_mnist.py index 6deea75..5383b39 100644 --- a/tests/test_train_mnist.py +++ b/tests/test_train_mnist.py @@ -1,5 +1,3 @@ -import os - import torch from tests.utils.mnist import MnistModel, MnistModelConfig @@ -8,11 +6,11 @@ is_cuda = torch.cuda.is_available() -def test_train_mnist(): +def test_train_mnist(tmp_path): model = MnistModel() - trainer = Trainer( - TrainerArgs(), MnistModelConfig(), model=model, output_path=os.getcwd(), gpu=0 if is_cuda else None - ) + + # Parsing command line args + trainer = Trainer(TrainerArgs(), MnistModelConfig(), output_path=tmp_path, model=model, gpu=0 if is_cuda else None) trainer.fit() loss1 = trainer.keep_avg_train["avg_loss"] @@ -21,3 +19,32 @@ def test_train_mnist(): loss2 = trainer.keep_avg_train["avg_loss"] assert loss1 > loss2 + + # Without parsing command line args + args = TrainerArgs() + + trainer2 = Trainer( + args, + MnistModelConfig(), + output_path=tmp_path, + model=model, + gpu=0 if is_cuda else None, + parse_command_line_args=False, + ) + trainer2.fit() + loss3 = trainer2.keep_avg_train["avg_loss"] + + args.continue_path = str(max(tmp_path.iterdir(), key=lambda p: p.stat().st_mtime)) + + trainer3 = Trainer( + args, + MnistModelConfig(), + output_path=tmp_path, + model=model, + gpu=0 if is_cuda else None, + parse_command_line_args=False, + ) + trainer3.fit() + loss4 = trainer3.keep_avg_train["avg_loss"] + + assert loss3 > loss4 diff --git a/tests/utils/train_mnist.py b/tests/utils/train_mnist.py index 13fda81..2fc5894 100644 --- a/tests/utils/train_mnist.py +++ b/tests/utils/train_mnist.py @@ -16,7 +16,6 @@ def main(): trainer = Trainer( train_args, config, - config.output_path, model=model, train_samples=model.get_data_loader(config, None, False, None, None, None), eval_samples=model.get_data_loader(config, None, True, None, None, None), diff --git a/trainer/trainer.py b/trainer/trainer.py index 332b36c..0fcb1c1 100644 --- a/trainer/trainer.py +++ b/trainer/trainer.py @@ -19,7 +19,7 @@ from torch.utils.data import DataLoader from trainer.callbacks import TrainerCallback -from trainer.config import TrainerArgs +from trainer.config import TrainerArgs, TrainerConfig from trainer.generic_utils import ( KeepAverage, count_parameters, @@ -65,11 +65,12 @@ class Trainer: def __init__( # pylint: disable=dangerous-default-value self, args: TrainerArgs, - config: Coqpit, - output_path: str, - c_logger: ConsoleLogger = None, - dashboard_logger: BaseDashboardLogger = None, - model: nn.Module = None, + config: TrainerConfig, + output_path: Optional[Union[str, os.PathLike[Any]]] = None, + *, + c_logger: Optional[ConsoleLogger] = None, + dashboard_logger: Optional[BaseDashboardLogger] = None, + model: Optional[nn.Module] = None, get_model: Optional[Callable] = None, get_data_samples: Optional[Callable] = None, train_samples: Optional[list] = None, @@ -99,7 +100,8 @@ def __init__( # pylint: disable=dangerous-default-value config (Coqpit): Model config object. It includes all the values necessary for initializing, training, evaluating and testing the model. - output_path (str): Path to the output training folder. All the files are saved under thi path. + output_path (str or Path, optional): Path to the output training folder. All + the files are saved under this path. Uses value from config if None. c_logger (ConsoleLogger, optional): Console logger for printing training status. If not provided, the default console logger is used. Defaults to None. @@ -158,7 +160,7 @@ def __init__( # pylint: disable=dangerous-default-value >>> args = TrainerArgs(...) >>> config = ModelConfig(...) >>> model = Model(config) - >>> trainer = Trainer(args, config, output_path, model=model) + >>> trainer = Trainer(args, config, model=model) >>> trainer.fit() TODO: @@ -190,7 +192,7 @@ def __init__( # pylint: disable=dangerous-default-value output_path = args.continue_path else: # override the output path if it is provided - output_path = config.output_path if output_path is None else output_path + output_path = config.output_path if output_path is None else str(output_path) # create a new output folder name output_path = get_experiment_folder_path(output_path, config.run_name) os.makedirs(output_path, exist_ok=True) From dd3b54371c0cf1cbcd0234eb426ade64d318939e Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Sat, 9 Nov 2024 09:01:42 +0100 Subject: [PATCH 4/7] refactor: handle deprecation of torch.cuda.amp.GradScaler torch.cuda.amp.GradScaler(args...) and torch.cpu.amp.GradScaler(args...) will be deprecated. Please use torch.GradScaler("cuda", args...) or torch.GradScaler("cpu", args...) instead. https://pytorch.org/docs/stable/amp.html torch.GradScaler is only available from torch>=2.3, so need to use a conditional and in type hints it needs to be surrounded by quotes. --- trainer/generic_utils.py | 5 +++++ trainer/trainer.py | 22 ++++++++++++++-------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/trainer/generic_utils.py b/trainer/generic_utils.py index d8b24bc..212c5d7 100644 --- a/trainer/generic_utils.py +++ b/trainer/generic_utils.py @@ -10,6 +10,11 @@ from trainer.logger import logger +def is_pytorch_at_least_2_3() -> bool: + """Check if the installed Pytorch version is 2.3 or higher.""" + return Version(torch.__version__) >= Version("2.3") + + def is_pytorch_at_least_2_4() -> bool: """Check if the installed Pytorch version is 2.4 or higher.""" return Version(torch.__version__) >= Version("2.4") diff --git a/trainer/trainer.py b/trainer/trainer.py index 0fcb1c1..5d374a4 100644 --- a/trainer/trainer.py +++ b/trainer/trainer.py @@ -1,3 +1,4 @@ +import functools import gc import importlib import logging @@ -25,6 +26,7 @@ count_parameters, get_experiment_folder_path, get_git_branch, + is_pytorch_at_least_2_3, is_pytorch_at_least_2_4, isimplemented, remove_experiment_folder, @@ -60,6 +62,11 @@ if is_apex_available(): from apex import amp # pylint: disable=import-error +if is_pytorch_at_least_2_3(): + GradScaler = functools.partial(torch.GradScaler, device="cuda") +else: + GradScaler = torch.cuda.amp.GradScaler + class Trainer: def __init__( # pylint: disable=dangerous-default-value @@ -331,7 +338,7 @@ def __init__( # pylint: disable=dangerous-default-value if self.use_apex: self.scaler = None self.model, self.optimizer = amp.initialize(self.model, self.optimizer, opt_level="O1") - self.scaler = torch.cuda.amp.GradScaler() + self.scaler = GradScaler() else: self.scaler = None @@ -340,7 +347,6 @@ def __init__( # pylint: disable=dangerous-default-value (self.model, self.optimizer, self.scaler, self.restore_step, self.restore_epoch) = self.restore_model( self.config, args.restore_path, self.model, self.optimizer, self.scaler ) - self.scaler = torch.cuda.amp.GradScaler() # setup scheduler self.scheduler = self.get_scheduler(self.model, self.config, self.optimizer) @@ -592,8 +598,8 @@ def restore_model( restore_path: Union[str, os.PathLike[Any]], model: nn.Module, optimizer: torch.optim.Optimizer, - scaler: torch.cuda.amp.GradScaler = None, - ) -> tuple[nn.Module, torch.optim.Optimizer, torch.cuda.amp.GradScaler, int]: + scaler: Optional["torch.GradScaler"] = None, + ) -> tuple[nn.Module, torch.optim.Optimizer, "torch.GradScaler", int]: """Restore training from an old run. It restores model, optimizer, AMP scaler and training stats. Args: @@ -601,10 +607,10 @@ def restore_model( restore_path (str): Path to the restored training run. model (nn.Module): Model to restored. optimizer (torch.optim.Optimizer): Optimizer to restore. - scaler (torch.cuda.amp.GradScaler, optional): AMP scaler to restore. Defaults to None. + scaler (torch.GradScaler, optional): AMP scaler to restore. Defaults to None. Returns: - Tuple[nn.Module, torch.optim.Optimizer, torch.cuda.amp.GradScaler, int]: [description] + Tuple[nn.Module, torch.optim.Optimizer, torch.GradScaler, int]: [description] """ def _restore_list_objs(states, obj): @@ -950,7 +956,7 @@ def _set_grad_clip_per_optimizer(config: Coqpit, optimizer_idx: int): def _compute_grad_norm(self, optimizer: torch.optim.Optimizer): return torch.norm(torch.cat([param.grad.view(-1) for param in self.master_params(optimizer)], dim=0), p=2) - def _grad_clipping(self, grad_clip: float, optimizer: torch.optim.Optimizer, scaler: torch.cuda.amp.GradScaler): + def _grad_clipping(self, grad_clip: float, optimizer: torch.optim.Optimizer, scaler: Optional["torch.GradScaler"]): """Perform gradient clipping""" if grad_clip is not None and grad_clip > 0: if scaler: @@ -966,7 +972,7 @@ def optimize( batch: dict, model: nn.Module, optimizer: torch.optim.Optimizer, - scaler: torch.cuda.amp.GradScaler, + scaler: "torch.GradScaler", criterion: nn.Module, scheduler: Union[torch.optim.lr_scheduler._LRScheduler, list, dict], # pylint: disable=protected-access config: Coqpit, From 981349b1093a3388fcab59b16bdfed2995fe8b5c Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Sat, 9 Nov 2024 13:23:38 +0100 Subject: [PATCH 5/7] refactor: simplify parse_argv, coqpit handles it internally --- trainer/trainer.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/trainer/trainer.py b/trainer/trainer.py index 5d374a4..04affe6 100644 --- a/trainer/trainer.py +++ b/trainer/trainer.py @@ -184,7 +184,7 @@ def __init__( # pylint: disable=dangerous-default-value if parse_command_line_args: # parse command-line arguments to override TrainerArgs() - args, coqpit_overrides = self.parse_argv(args) + coqpit_overrides = args.parse_known_args(arg_prefix="") # get ready for training and parse command-line arguments to override the model config config, new_fields = self.init_training(args, coqpit_overrides, config) @@ -456,18 +456,6 @@ def save_training_script(self) -> None: self.dashboard_logger.add_text("training-script", f"{f.read()}", 0) shutil.copyfile(file_path, os.path.join(self.output_path, file_name)) - @staticmethod - def parse_argv(args: Union[Coqpit, list]): - """Parse command line arguments to init or override `TrainerArgs()`.""" - if isinstance(args, Coqpit): - parser = args.init_argparse(arg_prefix="") - else: - train_config = TrainerArgs() - parser = train_config.init_argparse(arg_prefix="") - training_args, coqpit_overrides = parser.parse_known_args() - args.parse_args(training_args) - return args, coqpit_overrides - @staticmethod def init_loggers(config: "Coqpit", output_path: str, dashboard_logger=None, c_logger=None): """Init console and dashboard loggers. From ebc6fa8dd75b493442582758bb43d714a5366cee Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Mon, 11 Nov 2024 15:11:20 +0100 Subject: [PATCH 6/7] ci: update uv and move into composite action --- .github/actions/setup-uv/action.yml | 10 ++++++++++ .github/workflows/pypi-release.yml | 8 ++------ .github/workflows/style_check.yml | 8 ++------ .github/workflows/tests.yml | 8 ++------ 4 files changed, 16 insertions(+), 18 deletions(-) create mode 100644 .github/actions/setup-uv/action.yml diff --git a/.github/actions/setup-uv/action.yml b/.github/actions/setup-uv/action.yml new file mode 100644 index 0000000..a4f3f6d --- /dev/null +++ b/.github/actions/setup-uv/action.yml @@ -0,0 +1,10 @@ +name: Setup uv +runs: + using: 'composite' + steps: + - name: Install uv + uses: astral-sh/setup-uv@v3 + with: + version: "0.5.1" + enable-cache: true + cache-dependency-glob: "**/pyproject.toml" diff --git a/.github/workflows/pypi-release.yml b/.github/workflows/pypi-release.yml index f5b382a..d139cb9 100644 --- a/.github/workflows/pypi-release.yml +++ b/.github/workflows/pypi-release.yml @@ -19,12 +19,8 @@ jobs: if [[ "v$version" != "$tag" ]]; then exit 1 fi - - name: Install uv - uses: astral-sh/setup-uv@v3 - with: - version: "0.4.27" - enable-cache: true - cache-dependency-glob: "**/pyproject.toml" + - name: Setup uv + uses: ./.github/actions/setup-uv - name: Set up Python run: uv python install 3.12 - name: Build sdist and wheel diff --git a/.github/workflows/style_check.yml b/.github/workflows/style_check.yml index a146213..44f562d 100644 --- a/.github/workflows/style_check.yml +++ b/.github/workflows/style_check.yml @@ -15,12 +15,8 @@ jobs: python-version: [3.9] steps: - uses: actions/checkout@v4 - - name: Install uv - uses: astral-sh/setup-uv@v3 - with: - version: "0.4.27" - enable-cache: true - cache-dependency-glob: "**/pyproject.toml" + - name: Setup uv + uses: ./.github/actions/setup-uv - name: Set up Python ${{ matrix.python-version }} run: uv python install ${{ matrix.python-version }} - name: Lint check diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c3199f6..4b890c0 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,12 +16,8 @@ jobs: uv-resolution: ["lowest-direct", "highest"] steps: - uses: actions/checkout@v4 - - name: Install uv - uses: astral-sh/setup-uv@v3 - with: - version: "0.4.27" - enable-cache: true - cache-dependency-glob: "**/pyproject.toml" + - name: Setup uv + uses: ./.github/actions/setup-uv - name: Set up Python ${{ matrix.python-version }} run: uv python install ${{ matrix.python-version }} - name: Unit tests From 3d14f34d2eac6e4658b4438864300aabca8dc26f Mon Sep 17 00:00:00 2001 From: Enno Hermann Date: Wed, 13 Nov 2024 17:29:09 +0100 Subject: [PATCH 7/7] chore: update version to 0.1.7 [ci skip] --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 50c9751..8872993 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ include = ["trainer*"] [project] name = "coqui-tts-trainer" -version = "0.1.6" +version = "0.1.7" description = "General purpose model trainer for PyTorch that is more flexible than it should be, by 🐸Coqui." readme = "README.md" requires-python = ">=3.9, <3.13"