diff --git a/river_dl/evaluate.py b/river_dl/evaluate.py index 76a858d..06195d4 100755 --- a/river_dl/evaluate.py +++ b/river_dl/evaluate.py @@ -201,7 +201,7 @@ def partition_metrics( outfile=None, val_sites=None, test_sites=None, - + train_sites=None, ): """ calculate metrics for a certain group (or no group at all) for a given @@ -222,8 +222,9 @@ def partition_metrics( names and dict values are the id values. These are added as columns to the metrics information :param outfile: [str] file where the metrics should be written - :param val_sites: [list] sites to exclude from training metrics + :param val_sites: [list] sites to exclude from training and test metrics :param test_sites: [list] sites to exclude from validation and training metrics + :param train_sites: [list] sites to exclude from test metrics :return: [pd dataframe] the condensed metrics """ var_data = fmt_preds_obs(preds, obs_file, spatial_idx_name, @@ -240,6 +241,10 @@ def partition_metrics( # mask out test sites from val partition if test_sites and partition=='val': data = data[~data[spatial_idx_name].isin(test_sites)] + if train_sites and partition=='tst': + data = data[~data[spatial_idx_name].isin(train_sites)] + if val_sites and partition=='tst': + data = data[~data[spatial_idx_name].isin(val_sites)] if not group: metrics = calc_metrics(data) @@ -286,6 +291,7 @@ def combined_metrics( pred_tst=None, val_sites=None, test_sites=None, + train_sites=None, spatial_idx_name="seg_id_nat", time_idx_name="date", group=None, @@ -349,7 +355,8 @@ def combined_metrics( id_dict=id_dict, group=group, val_sites = val_sites, - test_sites = test_sites) + test_sites = test_sites, + train_sites=train_sites) df_all.extend([metrics]) df_all = pd.concat(df_all, axis=0) diff --git a/river_dl/torch_utils.py b/river_dl/torch_utils.py index 1751f40..05a7d06 100644 --- a/river_dl/torch_utils.py +++ b/river_dl/torch_utils.py @@ -238,19 +238,20 @@ def predict_torch(x_data, model, batch_size): @param device: [str] cuda or cpu @return: [tensor] predicted values """ - device = next(model.parameters()).device + device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + model.to(device) data = [] for i in range(len(x_data)): data.append(torch.from_numpy(x_data[i]).float()) dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=False, pin_memory=True) - model.to(device) model.eval() predicted = [] for iter, x in enumerate(dataloader): trainx = x.to(device) with torch.no_grad(): - output = model(trainx.to(device)).cpu() + output = model(trainx).detach().cpu() predicted.append(output) predicted = torch.cat(predicted, dim=0) return predicted