Skip to content

Commit

Permalink
Add finetune colab, better flood fill
Browse files Browse the repository at this point in the history
  • Loading branch information
xiazeyu committed May 19, 2023
1 parent e49eb2b commit 41106fc
Show file tree
Hide file tree
Showing 21 changed files with 6,276 additions and 22 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -26,3 +26,4 @@ Segmentation_Model_Output
1_Detection_Model/runs/
Baseline_2_Model_Output/
Models/
FineTuneDataset/
42 changes: 22 additions & 20 deletions 0_Labelling_Tools/1_Segmentation_Label_Flood_Fill.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,33 +10,36 @@ def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'input_dir', help='input label directory produced by EISeg')
'input_dir', help='input grey-scale label directory produced by EISeg')
parser.add_argument(
'output_dir', help='output YOLOv5 bounding box label directory')
return parser.parse_args()


def flood_fill(x, y, bx, ex, by, ey):
# curreny (x, y), begin x, end x, begin y, end y
global np_data
nexts = [(1, 0, 0, 1, 0, 0), (-1, 0, 1, 0, 0, 0),
(0, 1, 0, 0, 0, 1), (0, -1, 0, 0, 1, 0)]
for next_x, next_y, off_bx, off_ex, off_by, off_ey in nexts:
new_x = x + next_x
new_y = y + next_y

if new_x < 0 or new_x >= np_data.shape[0] or new_y < 0 or new_y >= np_data.shape[1]:
continue
if np_data[new_x][new_y] != 1:
continue

np_data[new_x][new_y] = 0

pbx, pex, pby, pey = flood_fill(
new_x, new_y, new_x + off_bx, new_x + off_ex, new_y + off_by, new_y + off_ey)

bx = min(bx, pbx)
ex = max(ex, pex)
by = min(by, pby)
ey = max(ey, pey)
q = [(x, y)]
while(len(q) != 0):
(x, y) = q.pop()
for next_x, next_y, off_bx, off_ex, off_by, off_ey in nexts:
new_x = x + next_x
new_y = y + next_y

if new_x < 0 or new_x >= np_data.shape[0] or new_y < 0 or new_y >= np_data.shape[1]:
continue
if np_data[new_x][new_y] != 1:
continue

np_data[new_x][new_y] = 0
q.append((new_x, new_y))

bx = min(bx, new_x + off_bx)
ex = max(ex, new_x + off_ex)
by = min(by, new_y + off_by)
ey = max(ey, new_y + off_ey)

return bx, ex, by, ey

Expand All @@ -57,7 +60,6 @@ def segmentation_label_flood_fill(args):

data = Image.open(filename)
data = data.convert('L')
data.show()
np_data = np.array(data)
np_data[np_data != 0] = 1

Expand Down
8 changes: 8 additions & 0 deletions 1_Detection_Model/data/dtsegnet_finetune.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
path: ../FineTuneDataset
train: images # train images (relative to 'path')
val: images # val images (relative to 'path')
test: images # test images (relative to 'path')

# Classes
nc: 1 # number of classes
names: ['precipitate'] # class names
69 changes: 69 additions & 0 deletions 3_Segmentation_Model/configs/dtsegnet/finetune.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
_base_: '../_base_/dtsegnet.yml'

batch_size: 1
iters: 2000

train_dataset:
type: Dataset
dataset_root: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset
train_path: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset/train.txt
num_classes: 2
transforms:
- type: Resize
target_size: [512, 512]
- type: RandomHorizontalFlip
- type: RandomVerticalFlip
- type: Normalize
mode: train

val_dataset:
type: Dataset
dataset_root: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset
val_path: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset/train.txt
num_classes: 2
transforms:
- type: Resize
target_size: [512, 512]
- type: Normalize
mode: val

test_dataset:
type: Dataset
dataset_root: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset
val_path: /content/DT_SegNet/FineTuneDataset/Segmentation_Dataset/train.txt
num_classes: 2
transforms:
- type: Resize
target_size: [512, 512]
- type: Normalize
mode: val

model:
type: SegFormer
backbone:
type: MixVisionTransformer_B1
embedding_dim: 256
num_classes: 2
pretrained: /content/DT_SegNet/Models/segmentation.pdparams

optimizer:
_inherited_: False
type: AdamW
beta1: 0.9
beta2: 0.999
weight_decay: 0.01

lr_scheduler:
type: PolynomialDecay
learning_rate: 0.00006
power: 1

loss:
types:
- type: CrossEntropyLoss
coef: [1]

test_config:
is_slide: True
crop_size: [1024, 1024]
stride: [768, 768]
Loading

0 comments on commit 41106fc

Please sign in to comment.