Skip to content

Commit

Permalink
Merge pull request #832 from VukW/black_include_all
Browse files Browse the repository at this point in the history
Added black trailing comma style fix
  • Loading branch information
Geeks-Sid authored Mar 26, 2024
2 parents 3e0480a + 03b7ce9 commit c52de72
Show file tree
Hide file tree
Showing 53 changed files with 218 additions and 491 deletions.
9 changes: 8 additions & 1 deletion .github/workflows/black.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,14 @@ jobs:
python-version: [3.9]
steps:
- uses: actions/checkout@v2

- name: Extract black version from setup.py
run: |
echo "BLACK_VERSION=$(python -c 'from setup import black_version; print(black_version)')" >> $GITHUB_ENV
- uses: psf/black@stable
with:
version: ${{ env.BLACK_VERSION }}

- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
Expand All @@ -20,7 +27,7 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install black==23.11.0
python -m pip install black==${{ env.BLACK_VERSION }}
- name: Run tests
run: |
Expand Down
4 changes: 1 addition & 3 deletions GANDLF/cli/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,7 @@ def deploy_docker_mlcube(
os.makedirs(output_workspace_folder, exist_ok=True)
if os.path.exists(mlcube_workspace_folder):
shutil.copytree(
mlcube_workspace_folder,
output_workspace_folder,
dirs_exist_ok=True,
mlcube_workspace_folder, output_workspace_folder, dirs_exist_ok=True
)

if config is not None:
Expand Down
62 changes: 26 additions & 36 deletions GANDLF/cli/generate_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,13 +112,10 @@ def generate_metrics_dict(
parameters["model"]["class_list"] = [1]
parameters["model"]["num_classes"] = 1
overall_stats_dict[current_subject_id][str(class_index)]["dice"] = dice(
current_prediction,
current_target,
current_prediction, current_target
).item()
nsd, hd100, hd95 = _calculator_generic_all_surface_distances(
current_prediction,
current_target,
parameters,
current_prediction, current_target, parameters
)
overall_stats_dict[current_subject_id][str(class_index)][
"nsd"
Expand All @@ -130,13 +127,8 @@ def generate_metrics_dict(
"hd95"
] = hd95.item()

(
s,
p,
) = _calculator_sensitivity_specificity(
current_prediction,
current_target,
parameters,
(s, p) = _calculator_sensitivity_specificity(
current_prediction, current_target, parameters
)
overall_stats_dict[current_subject_id][str(class_index)][
"sensitivity"
Expand All @@ -147,9 +139,7 @@ def generate_metrics_dict(
overall_stats_dict[current_subject_id][
"jaccard_" + str(class_index)
] = _calculator_jaccard(
current_prediction,
current_target,
parameters,
current_prediction, current_target, parameters
).item()
current_target_image = sitk.GetImageFromArray(
current_target[0, 0, ...].long()
Expand Down Expand Up @@ -279,9 +269,9 @@ def __percentile_clip(
strictlyPositive=True,
)

overall_stats_dict[current_subject_id]["ssim"] = (
structural_similarity_index(output_infill, gt_image_infill, mask).item()
)
overall_stats_dict[current_subject_id][
"ssim"
] = structural_similarity_index(output_infill, gt_image_infill, mask).item()

# ncc metrics
compute_ncc = parameters.get("compute_ncc", True)
Expand Down Expand Up @@ -322,30 +312,30 @@ def __percentile_clip(
).item()

# same as above but with epsilon for robustness
overall_stats_dict[current_subject_id]["psnr_eps"] = (
peak_signal_noise_ratio(
output_infill, gt_image_infill, epsilon=sys.float_info.epsilon
).item()
)
overall_stats_dict[current_subject_id][
"psnr_eps"
] = peak_signal_noise_ratio(
output_infill, gt_image_infill, epsilon=sys.float_info.epsilon
).item()

# only use fix data range to [0;1] if the data was normalized before
if normalize:
# torchmetrics PSNR but with fixed data range of 0 to 1
overall_stats_dict[current_subject_id]["psnr_01"] = (
peak_signal_noise_ratio(
output_infill, gt_image_infill, data_range=(0, 1)
).item()
)
overall_stats_dict[current_subject_id][
"psnr_01"
] = peak_signal_noise_ratio(
output_infill, gt_image_infill, data_range=(0, 1)
).item()

# same as above but with epsilon for robustness
overall_stats_dict[current_subject_id]["psnr_01_eps"] = (
peak_signal_noise_ratio(
output_infill,
gt_image_infill,
data_range=(0, 1),
epsilon=sys.float_info.epsilon,
).item()
)
overall_stats_dict[current_subject_id][
"psnr_01_eps"
] = peak_signal_noise_ratio(
output_infill,
gt_image_infill,
data_range=(0, 1),
epsilon=sys.float_info.epsilon,
).item()

pprint(overall_stats_dict)
if outputfile is not None:
Expand Down
4 changes: 1 addition & 3 deletions GANDLF/cli/patch_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@
patch_artifact_check,
# pen_marking_check,
)
from GANDLF.utils import (
parseTrainingCSV,
)
from GANDLF.utils import parseTrainingCSV


def parse_gandlf_csv(fpath):
Expand Down
4 changes: 1 addition & 3 deletions GANDLF/cli/preprocess_and_save.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,7 @@ def preprocess_and_save(
)

dataloader_for_processing = DataLoader(
data_for_processing,
batch_size=1,
pin_memory=False,
data_for_processing, batch_size=1, pin_memory=False
)

# initialize a new dict for the preprocessed data
Expand Down
14 changes: 3 additions & 11 deletions GANDLF/compute/forward_pass.py
Original file line number Diff line number Diff line change
Expand Up @@ -345,11 +345,7 @@ def validate_network(

# if jpg detected, convert to 8-bit arrays
ext = get_filename_extension_sanitized(subject["1"]["path"][0])
if ext in [
".jpg",
".jpeg",
".png",
]:
if ext in [".jpg", ".jpeg", ".png"]:
pred_mask = pred_mask.astype(np.uint8)

## special case for 2D
Expand All @@ -369,8 +365,7 @@ def validate_network(
# Create the subject directory if it doesn't exist in the
# current_output_dir directory
os.makedirs(
os.path.join(current_output_dir, "testing"),
exist_ok=True,
os.path.join(current_output_dir, "testing"), exist_ok=True
)
os.makedirs(
os.path.join(
Expand All @@ -386,10 +381,7 @@ def validate_network(
subject["subject_id"][0] + "_seg" + ext,
)

sitk.WriteImage(
result_image,
path_to_save,
)
sitk.WriteImage(result_image, path_to_save)
else:
# final regression output
output_prediction = output_prediction / len(patch_loader)
Expand Down
5 changes: 1 addition & 4 deletions GANDLF/compute/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,7 @@
from GANDLF.models import get_model
from GANDLF.schedulers import get_scheduler
from GANDLF.optimizers import get_optimizer
from GANDLF.data import (
get_train_loader,
get_validation_loader,
)
from GANDLF.data import get_train_loader, get_validation_loader
from GANDLF.utils import (
populate_header_in_parameters,
parseTrainingCSV,
Expand Down
34 changes: 8 additions & 26 deletions GANDLF/compute/inference_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,7 @@ def inference_loop(
or parameters["model"]["type"].lower() == "openvino"
), f"The model type is not recognized: {parameters['model']['type']}"

(
model,
_,
_,
_,
_,
parameters,
) = create_pytorch_objects(parameters, device=device)
(model, _, _, _, _, parameters) = create_pytorch_objects(parameters, device=device)

# Loading the weights into the model
main_dict = None
Expand Down Expand Up @@ -103,12 +96,10 @@ def inference_loop(
# Loading the executable OpenVINO model
if os.path.isdir(modelDir):
xml_to_check = os.path.join(
modelDir,
str(parameters["model"]["architecture"]) + "_best.xml",
modelDir, str(parameters["model"]["architecture"]) + "_best.xml"
)
bin_to_check = os.path.join(
modelDir,
str(parameters["model"]["architecture"]) + "_best.bin",
modelDir, str(parameters["model"]["architecture"]) + "_best.bin"
)
if not os.path.isfile(xml_to_check):
raise ValueError(
Expand Down Expand Up @@ -312,28 +303,20 @@ def inference_loop(
)

if parameters["problem_type"] != "segmentation":
output_file = os.path.join(
subject_dest_dir,
"predictions.csv",
)
output_file = os.path.join(subject_dest_dir, "predictions.csv")
with open(output_file, "w") as f:
f.write(output_to_write)

heatmaps = {}
if probs_map is not None:
try:
for n in range(parameters["model"]["num_classes"]):
heatmap_gray = np.array(
probs_map[n, ...] * 255,
dtype=np.uint8,
)
heatmap_gray = np.array(probs_map[n, ...] * 255, dtype=np.uint8)
heatmaps[str(n) + "_jet"] = cv2.applyColorMap(
heatmap_gray,
cv2.COLORMAP_JET,
heatmap_gray, cv2.COLORMAP_JET
)
heatmaps[str(n) + "_turbo"] = cv2.applyColorMap(
heatmap_gray,
cv2.COLORMAP_TURBO,
heatmap_gray, cv2.COLORMAP_TURBO
)
heatmaps[str(n) + "_agni"] = applyCustomColorMap(heatmap_gray)

Expand Down Expand Up @@ -367,8 +350,7 @@ def inference_loop(
)

file_to_write = os.path.join(
subject_dest_dir,
"probability_map_blended_" + key + ".png",
subject_dest_dir, "probability_map_blended_" + key + ".png"
)
cv2.imwrite(file_to_write, blended_image)
except Exception as ex:
Expand Down
17 changes: 4 additions & 13 deletions GANDLF/compute/training_loop.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,18 +100,14 @@ def train_network(
label = torch.cat([subject[key] for key in params["value_keys"]], dim=0)
# min is needed because for certain cases, batch size becomes smaller than the total remaining labels
label = label.reshape(
min(params["batch_size"], len(label)),
len(params["value_keys"]),
min(params["batch_size"], len(label)), len(params["value_keys"])
)
else:
label = subject["label"][torchio.DATA]
label = label.to(params["device"])

if params["save_training"]:
write_training_patches(
subject,
params,
)
write_training_patches(subject, params)

# ensure spacing is always present in params and is always subject-specific
if "spacing" in subject:
Expand Down Expand Up @@ -190,10 +186,7 @@ def train_network(
).tolist()
else:
to_print = total_epoch_train_metric[metric] / (batch_idx + 1)
print(
"Half-Epoch Average train " + metric + " : ",
to_print,
)
print("Half-Epoch Average train " + metric + " : ", to_print)

average_epoch_train_loss = total_epoch_train_loss / len(train_dataloader)
print(" Epoch Final train loss : ", average_epoch_train_loss)
Expand Down Expand Up @@ -343,9 +336,7 @@ def training_loop(
# this is just used to generate the headers for the overall stats
temp_tensor = torch.randint(0, params["model"]["num_classes"], (5,))
overall_metrics = overall_stats(
temp_tensor.to(dtype=torch.int32),
temp_tensor.to(dtype=torch.int32),
params,
temp_tensor.to(dtype=torch.int32), temp_tensor.to(dtype=torch.int32), params
)

metrics_log = params["metrics"].copy()
Expand Down
11 changes: 2 additions & 9 deletions GANDLF/config_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -445,11 +445,7 @@ def _parseConfig(
if len(params["data_preprocessing"]) > 0:
thresholdOrClip = False
# this can be extended, as required
thresholdOrClipDict = [
"threshold",
"clip",
"clamp",
]
thresholdOrClipDict = ["threshold", "clip", "clamp"]

resize_requested = False
temp_dict = deepcopy(params["data_preprocessing"])
Expand Down Expand Up @@ -708,10 +704,7 @@ def _parseConfig(
params["optimizer"] = temp_dict

# initialize defaults for inference mechanism
inference_mechanism = {
"grid_aggregator_overlap": "crop",
"patch_overlap": 0,
}
inference_mechanism = {"grid_aggregator_overlap": "crop", "patch_overlap": 0}
initialize_inference_mechanism = False
if not ("inference_mechanism" in params):
initialize_inference_mechanism = True
Expand Down
5 changes: 1 addition & 4 deletions GANDLF/data/augmentation/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,7 @@
flip,
anisotropy,
)
from .rotations import (
rotate_90,
rotate_180,
)
from .rotations import rotate_90, rotate_180
from .rgb_augs import colorjitter_transform
from .hed_augs import hed_transform

Expand Down
4 changes: 1 addition & 3 deletions GANDLF/data/augmentation/noise_enhanced.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,7 @@ def apply_transform(self, subject: Subject) -> Subject:
return transformed

def get_params(
self,
mean_range: Tuple[float, float],
std_range: Tuple[float, float],
self, mean_range: Tuple[float, float], std_range: Tuple[float, float]
) -> Tuple[float, float]:
mean = self.sample_uniform(*mean_range)
std = self.sample_uniform(*std_range)
Expand Down
5 changes: 1 addition & 4 deletions GANDLF/data/augmentation/rgb_augs.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,10 +77,7 @@ def apply_transform(self, subject: Subject) -> Subject:
else:
hue = self.hue_range
transform = ColorJitter(
brightness=brightness,
contrast=contrast,
saturation=saturation,
hue=hue,
brightness=brightness, contrast=contrast, saturation=saturation, hue=hue
)
for _, image in self.get_images_dict(subject).items():
# proceed with processing only if the image is RGB
Expand Down
3 changes: 1 addition & 2 deletions GANDLF/data/augmentation/wrap_torchio.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,7 @@
# define individual functions/lambdas for augmentations to handle properties
def mri_artifact(parameters):
return OneOf(
{RandomGhosting(): 0.5, RandomSpike(): 0.5},
p=parameters["probability"],
{RandomGhosting(): 0.5, RandomSpike(): 0.5}, p=parameters["probability"]
)


Expand Down
Loading

0 comments on commit c52de72

Please sign in to comment.