Skip to content

Commit

Permalink
update comments
Browse files Browse the repository at this point in the history
  • Loading branch information
FabianIsensee committed Jan 29, 2024
1 parent f4436e3 commit fa552d3
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 9 deletions.
2 changes: 1 addition & 1 deletion nnunetv2/evaluation/evaluate_predictions.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def key_to_label_or_region(key: str):

def save_summary_json(results: dict, output_file: str):
"""
stupid json does not support tuples as keys (why does it have to be so shitty) so we need to convert that shit
json does not support tuples as keys (why does it have to be so shitty) so we need to convert that shit
ourselves
"""
results_converted = deepcopy(results)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ def plan_experiment(self):
shutil.copy(join(self.raw_dataset_folder, 'dataset.json'),
join(nnUNet_preprocessed, self.dataset_name, 'dataset.json'))

# json is stupid and I hate it... "Object of type int64 is not JSON serializable" -> my ass
# json is ###. I hate it... "Object of type int64 is not JSON serializable"
plans = {
'dataset_name': self.dataset_name,
'plans_name': self.plans_identifier,
Expand Down
1 change: 0 additions & 1 deletion nnunetv2/inference/predict_from_raw_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@ def __init__(self,
self.use_mirroring = use_mirroring
if device.type == 'cuda':
# device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES!
# why would I ever want to do that. Stupid dobby. This kills DDP inference...
pass
if device.type != 'cuda':
print(f'perform_everything_on_device=True is only supported for cuda devices! Setting this to False')
Expand Down
2 changes: 1 addition & 1 deletion nnunetv2/preprocessing/resampling/default_resampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray],
force_separate_z: Union[bool, None] = False,
separate_z_anisotropy_threshold: float = ANISO_THRESHOLD):
"""
needed for segmentation export. Stupid, I know. Maybe we can fix that with Leos new resampling functions
needed for segmentation export. Stupid, I know
"""
if isinstance(data, torch.Tensor):
data = data.cpu().numpy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,6 @@

class nnUNetTrainerDA5(nnUNetTrainer):
def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):
"""
This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it.
"""
patch_size = self.configuration_manager.patch_size
dim = len(patch_size)
# todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation)
Expand Down
5 changes: 3 additions & 2 deletions nnunetv2/utilities/json_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@


def recursive_fix_for_json_export(my_dict: dict):
# json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro.
# json is ... a very nice thing to have
# 'cannot serialize object of type bool_/int64/float64'. Apart from that of course...
keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys....
for k in keys:
if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)):
Expand Down Expand Up @@ -37,7 +38,7 @@ def recursive_fix_for_json_export(my_dict: dict):


def fix_types_iterable(iterable, output_type):
# this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this.
# this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep your hands off of this.
out = []
for i in iterable:
if type(i) in (np.int64, np.int32, np.int8, np.uint8):
Expand Down

0 comments on commit fa552d3

Please sign in to comment.