Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ci/lint: update & fixing #36

Merged
merged 3 commits into from
Sep 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/ISSUE_TEMPLATE/bug_report.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ assignees: ''
Steps to reproduce the behavior:

1. Go to '...'
1. Run '....'
1. Scroll down to '....'
1. See error
2. Run '....'
3. Scroll down to '....'
4. See error

<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->

Expand Down
34 changes: 6 additions & 28 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
default_language_version:
python: python3.8
python: python3

ci:
autofix_prs: true
Expand All @@ -14,52 +14,30 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- id: check-case-conflict
- id: check-json
- id: check-yaml
- id: check-toml
#- id: pretty-format-json
- id: check-added-large-files
exclude: .*\.ipynb
args: ['--maxkb=250', '--enforce-all']
- id: check-docstring-first
- id: detect-private-key

- repo: https://github.com/asottile/pyupgrade
rev: v3.16.0
hooks:
- id: pyupgrade
args: [--py37-plus]
name: Upgrade code

- repo: https://github.com/PyCQA/docformatter
rev: v1.7.5
hooks:
- id: docformatter
args: [--in-place, --wrap-summaries=115, --wrap-descriptions=120]

- repo: https://github.com/omnilib/ufmt
rev: v2.7.0
hooks:
- id: ufmt
additional_dependencies:
- black == 22.3.0
- usort == 1.0.2

- repo: https://github.com/executablebooks/mdformat
rev: 0.7.17
hooks:
- id: mdformat
args: ['--number']
additional_dependencies:
- mdformat-gfm
- mdformat-black
- mdformat_frontmatter

- repo: https://github.com/asottile/yesqa
rev: v1.5.0
hooks:
- id: yesqa

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.0
hooks:
# try to fix what is possible
- id: ruff
args: ["--fix"]
# perform formatting updates
- id: ruff-format
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
# Kaggle: Image classification challenges

![CI complete testing](https://github.com/Borda/kaggle_image-classify/workflows/CI%20complete%20testing/badge.svg?branch=main&event=push)
[![Code formatting](https://github.com/Borda/kaggle_image-classify/actions/workflows/code-format.yml/badge.svg?branch=main&event=push)](https://github.com/Borda/kaggle_image-classify/actions/workflows/code-format.yml)
[![codecov](https://codecov.io/gh/Borda/kaggle_image-classify/branch/main/graph/badge.svg?token=5t1Aj5BIyS)](https://codecov.io/gh/Borda/kaggle_image-classify)
[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/Borda/kaggle_image-classify/main.svg)](https://results.pre-commit.ci/latest/github/Borda/kaggle_image-classify/main)

Expand Down
40 changes: 18 additions & 22 deletions kaggle_imgclassif/cassava/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,28 +10,24 @@
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms as T

TRAIN_TRANSFORM = T.Compose(
[
T.Resize(512),
T.RandomPerspective(),
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize([0.431, 0.498, 0.313], [0.237, 0.239, 0.227]),
]
)

VALID_TRANSFORM = T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize([0.431, 0.498, 0.313], [0.237, 0.239, 0.227]),
]
)
TRAIN_TRANSFORM = T.Compose([
T.Resize(512),
T.RandomPerspective(),
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.RandomVerticalFlip(),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize([0.431, 0.498, 0.313], [0.237, 0.239, 0.227]),
])

VALID_TRANSFORM = T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize([0.431, 0.498, 0.313], [0.237, 0.239, 0.227]),
])


class CassavaDataset(Dataset):
Expand Down
41 changes: 19 additions & 22 deletions kaggle_imgclassif/imet_collect/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import pandas as pd
import torch
import tqdm
from joblib import delayed, Parallel
from joblib import Parallel, delayed
from PIL import Image
from pytorch_lightning import LightningDataModule
from torch import Tensor
Expand All @@ -24,28 +24,24 @@
# ImageFile.LOAD_TRUNCATED_IMAGES = True

#: default training augmentation
TORCHVISION_TRAIN_TRANSFORM = T.Compose(
[
T.Resize(size=256, interpolation=Image.BILINEAR),
T.RandomRotation(degrees=25),
T.RandomPerspective(distortion_scale=0.2),
T.RandomResizedCrop(size=224),
# T.RandomHorizontalFlip(p=0.5),
T.RandomVerticalFlip(p=0.5),
# T.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
TORCHVISION_TRAIN_TRANSFORM = T.Compose([
T.Resize(size=256, interpolation=Image.BILINEAR),
T.RandomRotation(degrees=25),
T.RandomPerspective(distortion_scale=0.2),
T.RandomResizedCrop(size=224),
# T.RandomHorizontalFlip(p=0.5),
T.RandomVerticalFlip(p=0.5),
# T.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
#: default validation augmentation
TORCHVISION_VALID_TRANSFORM = T.Compose(
[
T.Resize(size=256, interpolation=Image.BILINEAR),
T.CenterCrop(size=224),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
TORCHVISION_VALID_TRANSFORM = T.Compose([
T.Resize(size=256, interpolation=Image.BILINEAR),
T.CenterCrop(size=224),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])


def load_image(path_img: str) -> Image.Image:
Expand Down Expand Up @@ -334,3 +330,4 @@ def test_dataloader(self) -> Optional[DataLoader]:
shuffle=False,
)
logging.warning("no testing images found")
return None
2 changes: 1 addition & 1 deletion kaggle_imgclassif/imet_collect/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import timm
import torch
from pytorch_lightning import LightningModule
from torch import nn, Tensor
from torch import Tensor, nn
from torch.nn import functional as F
from torchmetrics import Accuracy, F1Score, Precision

Expand Down
43 changes: 20 additions & 23 deletions kaggle_imgclassif/plant_pathology/augment.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Module to perform efficient preprocess and data augmentation."""

from typing import Tuple

import numpy as np
Expand All @@ -18,30 +19,26 @@
from kaggle_imgclassif.plant_pathology import DATASET_IMAGE_MEAN, DATASET_IMAGE_STD

#: default training augmentation
TORCHVISION_TRAIN_TRANSFORM = T.Compose(
[
T.Resize(size=512, interpolation=Image.BILINEAR),
T.RandomRotation(degrees=30),
T.RandomPerspective(distortion_scale=0.4),
T.RandomResizedCrop(size=224),
T.RandomHorizontalFlip(p=0.5),
T.RandomVerticalFlip(p=0.5),
# T.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom
]
)
TORCHVISION_TRAIN_TRANSFORM = T.Compose([
T.Resize(size=512, interpolation=Image.BILINEAR),
T.RandomRotation(degrees=30),
T.RandomPerspective(distortion_scale=0.4),
T.RandomResizedCrop(size=224),
T.RandomHorizontalFlip(p=0.5),
T.RandomVerticalFlip(p=0.5),
# T.ColorJitter(brightness=0.05, contrast=0.05, saturation=0.05, hue=0.05),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom
])
#: default validation augmentation
TORCHVISION_VALID_TRANSFORM = T.Compose(
[
T.Resize(size=256, interpolation=Image.BILINEAR),
T.CenterCrop(size=224),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom
]
)
TORCHVISION_VALID_TRANSFORM = T.Compose([
T.Resize(size=256, interpolation=Image.BILINEAR),
T.CenterCrop(size=224),
T.ToTensor(),
# T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
T.Normalize(DATASET_IMAGE_MEAN, DATASET_IMAGE_STD), # custom
])


class Resize(nn.Module):
Expand Down
4 changes: 3 additions & 1 deletion kaggle_imgclassif/plant_pathology/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,8 @@ def prepare_data(self):

@property
def num_classes(self) -> int:
assert self.train_dataset and self.valid_dataset
assert self.train_dataset
assert self.valid_dataset
return max(self.train_dataset.num_classes, self.valid_dataset.num_classes)

@staticmethod
Expand Down Expand Up @@ -316,3 +317,4 @@ def test_dataloader(self) -> Optional[DataLoader]:
shuffle=False,
)
logging.warning("no testing images found")
return None
2 changes: 1 addition & 1 deletion kaggle_imgclassif/plant_pathology/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import timm
import torch
from pytorch_lightning import LightningModule
from torch import nn, Tensor
from torch import Tensor, nn
from torch.nn import functional as F
from torchmetrics import Accuracy, F1Score, Precision

Expand Down
106 changes: 59 additions & 47 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,57 +4,69 @@ requires = [
"wheel",
]

[tool.black]
# https://github.com/psf/black
line-length = 120
exclude = "(.eggs|.git|.hg|.mypy_cache|.venv|_build|buck-out|build|dist)"
[tool.pytest.ini_options]
norecursedirs = [
".git",
".github",
"dist",
"build",
"docs",
]
addopts = [
"--strict-markers",
"--doctest-modules",
"--color=yes",
"--disable-pytest-warnings",
]
filterwarnings = [
"error::FutureWarning",
]
xfail_strict = true
junit_duration_report = "call"

[tool.usort]
known_first_party = [
"kaggle_imgclassif",
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"pass",
]
skip_glob = []
profile = "black"
line_length = 120

[tool.ruff]
target-version = "py38"
line-length = 120
# Enable Pyflakes `E` and `F` codes by default.
select = [
"E", "W", # see: https://pypi.org/project/pycodestyle
"F", # see: https://pypi.org/project/pyflakes
# "D", # see: https://pypi.org/project/pydocstyle
# "N", # see: https://pypi.org/project/pep8-naming
]
#extend-select = [
# "C4", # see: https://pypi.org/project/flake8-comprehensions
# "PT", # see: https://pypi.org/project/flake8-pytest-style
# "RET", # see: https://pypi.org/project/flake8-return
# "SIM", # see: https://pypi.org/project/flake8-simplify
#]
ignore = [
"E731", # Do not assign a lambda expression, use a def
]
# Exclude a variety of commonly ignored directories.
exclude = [
".eggs",
".git",
".ruff_cache",
"__pypackages__",
"_build",
"build",
"dist",
"docs"
]
ignore-init-module-imports = true

[tool.ruff.pydocstyle]
# Use Google-style docstrings.
convention = "google"

#[tool.ruff.pycodestyle]
#ignore-overlong-task-comments = true

[tool.ruff.mccabe]
# Unlike Flake8, default to a complexity level of 10.
max-complexity = 10
lint.mccabe.max-complexity = 10
# Use Google-style docstrings.
lint.pydocstyle.convention = "google"
format.preview = true
lint.select = [
"E",
"F", # see: https://pypi.org/project/pyflakes
"I", #see: https://pypi.org/project/isort
"S", # see: https://pypi.org/project/flake8-bandit
"UP", # see: https://docs.astral.sh/ruff/rules/#pyupgrade-up
# "D", # see: https://pypi.org/project/pydocstyle
"W", # see: https://pypi.org/project/pycodestyle
]
lint.extend-select = [
# "C4", # see: https://pypi.org/project/flake8-comprehensions
"PLE", # see: https://pypi.org/project/pylint/
"PT", # see: https://pypi.org/project/flake8-pytest-style
"RET", # see: https://pypi.org/project/flake8-return
"RUF100", # Ralternative to yesqa
"SIM", # see: https://pypi.org/project/flake8-simplify
]
lint.ignore = [
"S101", # todo: Use of `assert` detected
]
lint.unfixable = [
"F401",
]
[tool.ruff.lint.per-file-ignores]
"setup.py" = ["D100", "SIM115"]
"scripts_*/**" = [
"S", "D"
]
"tests/**" = [
"S", "D"
]
Loading
Loading