Skip to content
This repository has been archived by the owner on Nov 16, 2023. It is now read-only.

Commit

Permalink
fixes after removing forking workflow (#322)
Browse files Browse the repository at this point in the history
* PR to resolve merge issues

* updated main build as well

* added ability to read in git branch name directly

* manually updated the other files
  • Loading branch information
maxkazmsft committed May 27, 2020
1 parent 514a2b3 commit c0e20e3
Show file tree
Hide file tree
Showing 8 changed files with 282 additions and 65 deletions.
2 changes: 2 additions & 0 deletions environment/anaconda/local/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -38,3 +38,5 @@ dependencies:
- scipy==1.1.0
- jupytext==1.3.0
- validators
- pyyaml

Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ DATASET:
MODEL:
NAME: seg_hrnet
IN_CHANNELS: 3
PRETRAINED: "/home/yazeed/seismic-deeplearning/docker/hrnetv2_w48_imagenet_pretrained.pth"
PRETRAINED: "<ImageNet pretrained model path - please see main README>"
EXTRA:
FINAL_CONV_KERNEL: 1
STAGE2:
Expand Down Expand Up @@ -93,7 +93,7 @@ VALIDATION:
BATCH_SIZE_PER_GPU: 128

TEST:
MODEL_PATH: "/home/yazeed/seismic-deeplearning/docker/hrnetv2_w48_imagenet_pretrained.pth"
MODEL_PATH: "/data/home/mat/repos/DeepSeismic/experiments/interpretation/dutchf3_patch/local/output/staging/0d1d2bbf9685995a0515ca1d9de90f9bcec0db90/seg_hrnet/Dec20_233535/models/seg_hrnet_running_model_33.pth"
TEST_STRIDE: 10
SPLIT: 'Both' # Can be Both, Test1, Test2
INLINE: True
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@ TRAIN:
AUGMENTATION: True
DEPTH: "none" # Options are none, patch, and section
STRIDE: 50
PATCH_SIZE: 99
PATCH_SIZE: 100
AUGMENTATIONS:
RESIZE:
HEIGHT: 99
WIDTH: 99
HEIGHT: 100
WIDTH: 100
PAD:
HEIGHT: 99
WIDTH: 99
HEIGHT: 100
WIDTH: 100
MEAN: 0.0009997 # 0.0009996710808862074
STD: 0.20977 # 0.20976548783479299
MODEL_DIR: "models"
Expand All @@ -53,5 +53,5 @@ TEST:
INLINE: True
CROSSLINE: True
POST_PROCESSING:
SIZE: 99
SIZE: 100
CROP_PIXELS: 0 # Number of pixels to crop top, bottom, left and right
31 changes: 26 additions & 5 deletions experiments/interpretation/dutchf3_patch/local/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
from cv_lib.segmentation import models
from cv_lib.segmentation.dutchf3.utils import current_datetime, git_branch, git_hash

from cv_lib.utils import load_log_configuration, mask_to_disk, generate_path
from cv_lib.utils import load_log_configuration, mask_to_disk, generate_path, image_to_disk
from deepseismic_interpretation.dutchf3.data import add_patch_depth_channels, get_test_loader
from default import _C as config
from default import update_config
Expand Down Expand Up @@ -201,7 +201,7 @@ def _output_processing_pipeline(config, output):


def _patch_label_2d(
model, img, pre_processing, output_processing, patch_size, stride, batch_size, device, num_classes,
model, img, pre_processing, output_processing, patch_size, stride, batch_size, device, num_classes, split, debug
):
"""Processes a whole section
"""
Expand All @@ -221,10 +221,26 @@ def _patch_label_2d(
)

model_output = model(batch.to(device))

for (hdx, wdx), output in zip(batch_indexes, model_output.detach().cpu()):
output = output_processing(output)
output_p[:, :, hdx + ps : hdx + ps + patch_size, wdx + ps : wdx + ps + patch_size,] += output

# dump the data right before it's being put into the model and after scoring
if debug:
outdir = f"debug/batch_{split}"
generate_path(outdir)
for i in range(batch.shape[0]):
image_to_disk(
np.array(batch[i, 0, :, :]), f"{outdir}/{batch_indexes[i][0]}_{batch_indexes[i][1]}_img.png"
)
# now dump model predictions
for nclass in range(num_classes):
mask_to_disk(
np.array(model_output[i, nclass, :, :].detach().cpu()),
f"{outdir}/{batch_indexes[i][0]}_{batch_indexes[i][1]}_class_{nclass}_pred.png",
)

# crop the output_p in the middle
output = output_p[:, :, ps:-ps, ps:-ps]
return output
Expand All @@ -235,12 +251,14 @@ def _evaluate_split(
logger = logging.getLogger(__name__)

TestSectionLoader = get_test_loader(config)

test_set = TestSectionLoader(
config.DATASET.ROOT,
config.DATASET.NUM_CLASSES,
split=split,
is_transform=True,
augmentations=section_aug
augmentations=section_aug,
debug=debug,
)

n_classes = test_set.n_classes
Expand All @@ -253,10 +271,10 @@ def _evaluate_split(

try:
output_dir = generate_path(
config.OUTPUT_DIR + "_test", git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),
f"debug/{config.OUTPUT_DIR}_test_{split}", git_branch(), git_hash(), config.MODEL.NAME, current_datetime(),
)
except TypeError:
output_dir = generate_path(config.OUTPUT_DIR + "_test", config.MODEL.NAME, current_datetime(),)
output_dir = generate_path(f"debug/{config.OUTPUT_DIR}_test_{split}", config.MODEL.NAME, current_datetime(),)

running_metrics_split = runningScore(n_classes)

Expand All @@ -278,6 +296,8 @@ def _evaluate_split(
config.VALIDATION.BATCH_SIZE_PER_GPU,
device,
n_classes,
split,
debug,
)

pred = outputs.detach().max(1)[1].numpy()
Expand Down Expand Up @@ -331,6 +351,7 @@ def _write_section_file(labels, section_file):


def test(*options, cfg=None, debug=False):

update_config(config, options=options, config_file=cfg)
n_classes = config.DATASET.NUM_CLASSES

Expand Down
10 changes: 4 additions & 6 deletions experiments/interpretation/dutchf3_patch/local/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def run(*options, cfg=None, debug=False):
config.OUTPUT_DIR, git_branch(), git_hash(), config_file_name, config.TRAIN.MODEL_DIR, current_datetime(),
)
except TypeError:
output_dir = generate_path(config.OUTPUT_DIR, config_file_name, config.TRAIN.MODEL_DIR, current_datetime(),)
output_dir = generate_path(config.OUTPUT_DIR, config_file_name, config.TRAIN.MODEL_DIR, current_datetime(),)

# Logging:
load_log_configuration(config.LOG_CONFIG)
Expand Down Expand Up @@ -133,8 +133,7 @@ def run(*options, cfg=None, debug=False):
stride=config.TRAIN.STRIDE,
patch_size=config.TRAIN.PATCH_SIZE,
augmentations=train_aug,
#augmentations=Resize(config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True),
debug=True
debug=True,
)
logger.info(train_set)
n_classes = train_set.n_classes
Expand All @@ -146,14 +145,13 @@ def run(*options, cfg=None, debug=False):
stride=config.TRAIN.STRIDE,
patch_size=config.TRAIN.PATCH_SIZE,
augmentations=val_aug,
#augmentations=Resize(config.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT, config.TRAIN.AUGMENTATIONS.RESIZE.WIDTH, always_apply=True),
debug=True
debug=True,
)
logger.info(val_set)

if debug:
logger.info("Running in debug mode..")
train_set = data.Subset(train_set, range(config.TRAIN.BATCH_SIZE_PER_GPU*config.NUM_DEBUG_BATCHES))
train_set = data.Subset(train_set, range(config.TRAIN.BATCH_SIZE_PER_GPU * config.NUM_DEBUG_BATCHES))
val_set = data.Subset(val_set, range(config.VALIDATION.BATCH_SIZE_PER_GPU))

train_loader = data.DataLoader(
Expand Down
70 changes: 66 additions & 4 deletions interpretation/deepseismic_interpretation/dutchf3/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ def __init__(self, data_dir, n_classes, split="train", is_transform=True, augmen
self.augmentations = augmentations
self.n_classes = n_classes
self.sections = list()
self.debug = debug

def __len__(self):
return len(self.sections)
Expand All @@ -150,13 +151,25 @@ def __getitem__(self, index):

im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl)

if self.debug and "test" in self.split:
outdir = f"debug/sectionLoader_{self.split}_raw"
generate_path(outdir)
image_to_disk(im, f"{outdir}/index_{index}_section_{section_name}_img.png")
mask_to_disk(lbl, f"{outdir}/index_{index}_section_{section_name}_lbl.png")

if self.augmentations is not None:
augmented_dict = self.augmentations(image=im, mask=lbl)
im, lbl = augmented_dict["image"], augmented_dict["mask"]

if self.is_transform:
im, lbl = self.transform(im, lbl)

if self.debug and "test" in self.split:
outdir = f"debug/sectionLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}"
generate_path(outdir)
image_to_disk(np.array(im[0]), f"{outdir}/index_{index}_section_{section_name}_img.png")
mask_to_disk(np.array(lbl[0]), f"{outdir}/index_{index}_section_{section_name}_lbl.png")

return im, lbl

def transform(self, img, lbl):
Expand Down Expand Up @@ -390,6 +403,14 @@ def __getitem__(self, index):

im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl)

# dump images before augmentation
if self.debug:
outdir = f"debug/testSectionLoaderWithDepth_{self.split}_raw"
generate_path(outdir)
# this needs to take the first dimension of image (no depth) but lbl only has 1 dim
image_to_disk(im[0, :, :], f"{outdir}/index_{index}_section_{section_name}_img.png")
mask_to_disk(lbl, f"{outdir}/index_{index}_section_{section_name}_lbl.png")

if self.augmentations is not None:
im = _transform_CHW_to_HWC(im)
augmented_dict = self.augmentations(image=im, mask=lbl)
Expand All @@ -399,6 +420,15 @@ def __getitem__(self, index):
if self.is_transform:
im, lbl = self.transform(im, lbl)

# dump images and labels to disk after augmentation
if self.debug:
outdir = (
f"debug/testSectionLoaderWithDepth_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}"
)
generate_path(outdir)
image_to_disk(np.array(im[0, :, :]), f"{outdir}/index_{index}_section_{section_name}_img.png")
mask_to_disk(np.array(lbl[0, :, :]), f"{outdir}/index_{index}_section_{section_name}_lbl.png")

return im, lbl


Expand Down Expand Up @@ -430,7 +460,7 @@ def __init__(
self.patches = list()
self.patch_size = patch_size
self.stride = stride
self.debug=debug
self.debug = debug

def pad_volume(self, volume):
"""
Expand All @@ -448,7 +478,7 @@ def __getitem__(self, index):

# Shift offsets the padding that is added in training
# shift = self.patch_size if "test" not in self.split else 0
# Remember we are cancelling the shift since we no longer pad
# Remember we are cancelling the shift since we no longer pad
shift = 0
idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift

Expand All @@ -461,6 +491,13 @@ def __getitem__(self, index):

im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl)

# dump raw images before augmentation
if self.debug:
outdir = f"debug/patchLoader_{self.split}_raw"
generate_path(outdir)
image_to_disk(im, f"{outdir}/index_{index}_section_{patch_name}_img.png")
mask_to_disk(lbl, f"{outdir}/index_{index}_section_{patch_name}_lbl.png")

if self.augmentations is not None:
augmented_dict = self.augmentations(image=im, mask=lbl)
im, lbl = augmented_dict["image"], augmented_dict["mask"]
Expand All @@ -474,6 +511,14 @@ def __getitem__(self, index):

if self.is_transform:
im, lbl = self.transform(im, lbl)

# dump images and labels to disk
if self.debug:
outdir = f"debug/patchLoader_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}"
generate_path(outdir)
image_to_disk(np.array(im[0, :, :]), f"{outdir}/index_{index}_section_{patch_name}_img.png")
mask_to_disk(np.array(lbl[0, :, :]), f"{outdir}/index_{index}_section_{patch_name}_lbl.png")

return im, lbl

def transform(self, img, lbl):
Expand Down Expand Up @@ -619,7 +664,7 @@ def __getitem__(self, index):

# Shift offsets the padding that is added in training
# shift = self.patch_size if "test" not in self.split else 0
# Remember we are cancelling the shift since we no longer pad
# Remember we are cancelling the shift since we no longer pad
shift = 0
idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift

Expand Down Expand Up @@ -698,7 +743,7 @@ def __getitem__(self, index):

# Shift offsets the padding that is added in training
# shift = self.patch_size if "test" not in self.split else 0
# Remember we are cancelling the shift since we no longer pad
# Remember we are cancelling the shift since we no longer pad
shift = 0
idx, xdx, ddx = int(idx) + shift, int(xdx) + shift, int(ddx) + shift

Expand All @@ -712,6 +757,13 @@ def __getitem__(self, index):

im, lbl = _transform_WH_to_HW(im), _transform_WH_to_HW(lbl)

# dump images before augmentation
if self.debug:
outdir = f"debug/patchLoaderWithSectionDepth_{self.split}_raw"
generate_path(outdir)
image_to_disk(im[0, :, :], f"{outdir}/index_{index}_section_{patch_name}_img.png")
mask_to_disk(lbl, f"{outdir}/index_{index}_section_{patch_name}_lbl.png")

if self.augmentations is not None:
im = _transform_CHW_to_HWC(im)
augmented_dict = self.augmentations(image=im, mask=lbl)
Expand All @@ -727,6 +779,16 @@ def __getitem__(self, index):

if self.is_transform:
im, lbl = self.transform(im, lbl)

# dump images and labels to disk after augmentation
if self.debug:
outdir = (
f"debug/patchLoaderWithSectionDepth_{self.split}_{'aug' if self.augmentations is not None else 'noaug'}"
)
generate_path(outdir)
image_to_disk(np.array(im[0, :, :]), f"{outdir}/index_{index}_section_{patch_name}_img.png")
mask_to_disk(np.array(lbl[0, :, :]), f"{outdir}/index_{index}_section_{patch_name}_lbl.png")

return im, lbl

def __repr__(self):
Expand Down
Loading

0 comments on commit c0e20e3

Please sign in to comment.