Skip to content

Commit

Permalink
Modify the code to be more in line with community requirements
Browse files Browse the repository at this point in the history
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/basemodel.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/dataloaders/custom_transforms.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/dataloaders/datasets/citylostfound.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/dataloaders/datasets/cityscapes.py
	deleted:    examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/dataloaders/datasets/temp.txt
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/mypath.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/sedna_evaluate.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/sedna_predict.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/utils/args.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/utils/iouEval.py
	modified:   examples/class_increment_semantic_segmentation/lifelong_learning_bench/testalgorithms/erfnet/ERFNet/utils/loss.py

    Singned-off-by: qxygxt <[email protected]>
  • Loading branch information
qxygxt committed Oct 31, 2023
1 parent f7e1f47 commit 2f14077
Show file tree
Hide file tree
Showing 11 changed files with 13 additions and 96 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,6 @@ def __init__(self, **kwargs):
self.train_args.epochs = kwargs.get("epochs", 2)
self.train_args.eval_interval = kwargs.get("eval_interval", 2)
self.train_args.no_val = kwargs.get("no_val", True)
# self.train_args.resume = Context.get_parameters("PRETRAINED_MODEL_URL", None)
self.trainer = None

label_save_dir = Context.get_parameters("INFERENCE_RESULT_DIR", "./inference_results")
Expand Down Expand Up @@ -78,11 +77,6 @@ def train(self, train_data, valid_data=None, **kwargs):
'optimizer': self.trainer.optimizer.state_dict(),
'best_pred': self.trainer.best_pred,
}, is_best)

# if not self.trainer.args.no_val and \
# epoch % self.train_args.eval_interval == (self.train_args.eval_interval - 1) \
# and self.trainer.val_loader:
# self.trainer.validation(epoch)

self.trainer.writer.close()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __call__(self, sample):
img -= self.mean
img /= self.std

# mean and std for original depth images
# mean and std for original depth images, indicate the mean and standard deviation values for original depth images.
mean_depth = 0.12176
std_depth = 0.09752

Expand Down Expand Up @@ -69,6 +69,7 @@ def __call__(self, sample):
depth = sample['depth']
mask = sample['label']
width, height = img.size
# coordinate of the left, right, top, bottom boundary of the cropping region.
left = 140
top = 30
right = 2030
Expand All @@ -81,10 +82,6 @@ def __call__(self, sample):
img = img.resize((width,height), Image.BILINEAR)
depth = depth.resize((width,height), Image.BILINEAR)
mask = mask.resize((width,height), Image.NEAREST)
# img = img.resize((512,1024), Image.BILINEAR)
# depth = depth.resize((512,1024), Image.BILINEAR)
# mask = mask.resize((512,1024), Image.NEAREST)

return {'image': img,
'depth': depth,
'label': mask}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,6 @@ def __getitem__(self, index):
def relabel_lostandfound(self, input):
input = tr.Relabel(0, self.ignore_index)(input) # background->255 ignore
input = tr.Relabel(1, 0)(input) # road 1->0
# input = Relabel(255, 20)(input) # unlabel 20
input = tr.Relabel(2, 19)(input) # obstacle 19
return input

Expand All @@ -108,7 +107,7 @@ def transform_tr(self, sample):
tr.CropBlackArea(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
# help standardize the pixel values to have a mean of (0, 0, 0) and a standard deviation of (1, 1, 1).
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

Expand All @@ -126,7 +125,6 @@ def transform_val(self, sample):
def transform_ts(self, sample):

composed_transforms = transforms.Compose([
# tr.CropBlackArea(),
tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])
Expand Down Expand Up @@ -171,7 +169,7 @@ def __getitem__(self, index):
_img = Image.open(img_path).convert('RGB')
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
if self.split == 'train':
if index < 1036: # lostandfound
if index < 1036: # threshold for lostandfound
_tmp = self.relabel_lostandfound(_tmp)
else: # cityscapes
pass
Expand Down Expand Up @@ -212,7 +210,6 @@ def transform_tr(self, sample):
tr_rgb.CropBlackArea(),
tr_rgb.RandomHorizontalFlip(),
tr_rgb.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
tr_rgb.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr_rgb.ToTensor()])

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,7 @@ class CityscapesSegmentation(data.Dataset):

def __init__(self, args, root=Path.db_root_dir('cityscapes'), data=None, split="train"):

# self.root = root
self.root = "/home/lsq/Dataset/"
self.root = root
self.split = split
self.args = args
self.images = {}
Expand Down Expand Up @@ -95,7 +94,6 @@ def transform_tr(self, sample):
tr.CropBlackArea(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

Expand All @@ -113,8 +111,6 @@ def transform_val(self, sample):
def transform_ts(self, sample):

composed_transforms = transforms.Compose([
#tr.CropBlackArea(),
#tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,19 @@ class Path(object):
@staticmethod
def db_root_dir(dataset):
if dataset == 'cityscapes':
return '/home/robo/m0063/project/RFNet-master/Data/cityscapes/' # folder that contains leftImg8bit/
return './ianvs/project/RFNet-master/Data/cityscapes/' # folder that contains leftImg8bit/
elif dataset == 'citylostfound':
return '/home/robo/m0063/project/RFNet-master/Data/cityscapesandlostandfound/' # folder that mixes Cityscapes and Lost and Found
return './ianvs/project/RFNet-master/Data/cityscapesandlostandfound/' # folder that mixes Cityscapes and Lost and Found
elif dataset == 'cityrand':
return '/home/robo/m0063/project/RFNet-master/Data/cityrand/'
return './ianvs/project/RFNet-master/Data/cityrand/'
elif dataset == 'target':
return '/home/robo/m0063/project/RFNet-master/Data/target/'
return './ianvs/project/RFNet-master/Data/target/'
elif dataset == 'xrlab':
return '/home/robo/m0063/project/RFNet-master/Data/xrlab/'
return './ianvs/project/RFNet-master/Data/xrlab/'
elif dataset == 'e1':
return '/home/robo/m0063/project/RFNet-master/Data/e1/'
return './ianvs/project/RFNet-master/Data/e1/'
elif dataset == 'mapillary':
return '/home/robo/m0063/project/RFNet-master/Data/mapillary/'
return './ianvs/project/RFNet-master/Data/mapillary/'
else:
print('Dataset {} not available.'.format(dataset))
raise NotImplementedError
Original file line number Diff line number Diff line change
@@ -1,10 +1,5 @@
import os
os.environ['BACKEND_TYPE'] = 'PYTORCH'
# os.environ["KB_SERVER"] = "http://0.0.0.0:9020"
# os.environ["test_dataset_url"] = "./data_txt/sedna_data.txt"
# os.environ["MODEL_URLS"] = "./cloud_next_kb/index.pkl"
# os.environ["operator"] = "<"
# os.environ["model_threshold"] = "0"

from sedna.core.lifelong_learning import LifelongLearning
from sedna.datasources import IndexDataParse
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
import os

os.environ['BACKEND_TYPE'] = 'PYTORCH'
# os.environ["UNSEEN_SAVE_URL"] = "s3://kubeedge/sedna-robo/unseen_samples/"
# set at yaml
# os.environ["PREDICT_RESULT_DIR"] = "./inference_results"
os.environ["TEST_DATASET_URL"] = "./data_txt/door_test.txt"
os.environ["EDGE_OUTPUT_URL"] = "./edge_kb"
os.environ["ORIGINAL_DATASET_URL"] = "/tmp"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def __init__(self, **kwargs):
self.eval_interval = kwargs.get("eval_interval", 50)
self.no_val = kwargs.get("no_val", True)
self.cuda = True
self.savedir = '/home/QXY/dataset/save'
self.savedir = './dataset/mdil-ss/save'

class ValArgs:
def __init__(self, **kwargs):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,9 @@ def reset(self):
self.tp_obstacle = torch.zeros(1).double()
self.idp_obstacle = torch.zeros(1).double()
self.tp_nonobstacle = torch.zeros(1).double()
# self.cdi = torch.zeros(1).double()

def addBatch(self, x, y): # x=preds, y=targets
# sizes should be "batch_size x nClasses x H x W"
# cdi = 0

# print ("X is cuda: ", x.is_cuda)
# print ("Y is cuda: ", y.is_cuda)

if (x.is_cuda or y.is_cuda):
x = x.cuda()
Expand Down Expand Up @@ -76,16 +71,10 @@ def addBatch(self, x, y): # x=preds, y=targets
idp_obstacle = (x_onehot[:, 19] - tpmult[:, 19]).sum()
tp_nonobstacle = (-1*y_onehot+1).sum()

# for i in range(0, x.size(0)):
# if tpmult[i].sum()/(y_onehot[i].sum() + 1e-15) >= 0.5:
# cdi += 1


self.cdp_obstacle += cdp_obstacle.double().cpu()
self.tp_obstacle += tp_obstacle.double().cpu()
self.idp_obstacle += idp_obstacle.double().cpu()
self.tp_nonobstacle += tp_nonobstacle.double().cpu()
# self.cdi += cdi.double().cpu()



Expand All @@ -94,7 +83,6 @@ def getIoU(self):
den = self.tp + self.fp + self.fn + 1e-15
iou = num / den
iou_not_zero = list(filter(lambda x: x != 0, iou))
# print(len(iou_not_zero))
iou_mean = sum(iou_not_zero) / len(iou_not_zero)
tfp = self.tp + self.fp + 1e-15
acc = num / tfp
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,3 @@ def FocalLoss(self, logit, target, gamma=2, alpha=0.5):

if __name__ == "__main__":
loss = SegmentationLosses(cuda=True)
a = torch.rand(1, 3, 7, 7).cuda()
b = torch.rand(1, 7, 7).cuda()
print(loss.CrossEntropyLoss(a, b).item())
print(loss.FocalLoss(a, b, gamma=0, alpha=None).item())
print(loss.FocalLoss(a, b, gamma=2, alpha=0.5).item())




0 comments on commit 2f14077

Please sign in to comment.