forked from Obsir/semantic-segmentation-framework-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
build.py
61 lines (54 loc) · 1.85 KB
/
build.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import albumentations as albu
class Transforms(object):
@staticmethod
def build_transforms(cfg, split='train'):
if split == 'train':
train_transform = [
albu.Resize(cfg.INPUT.SIZE, cfg.INPUT.SIZE),
# albu.HorizontalFlip(p=0.5),
albu.OneOf(
[
# albu.RandomRotate90(p=1),
albu.Rotate(p=1, limit=(-15, 15)),
]
, p=0.5),
albu.GaussNoise(p=0.5),
albu.OneOf(
[
# albu.CLAHE(p=1),
albu.RandomBrightnessContrast(p=1),
],
p=0.9,
),
albu.OneOf(
[
albu.IAASharpen(p=1),
albu.Blur(p=1),
albu.MedianBlur(p=1),
],
p=0.9,
),
]
return albu.Compose(train_transform, p=0.6)
else:
test_transform = [
albu.Resize(cfg.INPUT.SIZE, cfg.INPUT.SIZE)
]
return albu.Compose(test_transform)
@staticmethod
def to_tensor(x, **kwargs):
return x.transpose(2, 0, 1).astype('float32')
@staticmethod
def get_preprocessing(preprocessing_fn):
"""Construct preprocessing transform
Args:
preprocessing_fn (callbale): data normalization function
(can be specific for each pretrained neural network)
Return:
transform: albumentations.Compose
"""
_transform = [
albu.Lambda(image=preprocessing_fn),
albu.Lambda(image=Transforms.to_tensor, mask=Transforms.to_tensor),
]
return albu.Compose(_transform)