diff --git a/setup.py b/setup.py index befb663b98..6cc65702a7 100644 --- a/setup.py +++ b/setup.py @@ -160,7 +160,7 @@ def package_files(prefix: str, directory: str, extension: str): ] extra_deps['wandb'] = [ - 'wandb>=0.13.2,<0.18', + 'wandb>=0.13.2,<0.19', ] extra_deps['comet_ml'] = [ diff --git a/tests/loggers/test_wandb_logger.py b/tests/loggers/test_wandb_logger.py index e190e39663..b0462fc842 100644 --- a/tests/loggers/test_wandb_logger.py +++ b/tests/loggers/test_wandb_logger.py @@ -269,10 +269,10 @@ def test_wandb_log_metrics(test_wandb_logger): eval_metrics_cross_entropy_count = all_run_text.count('metrics/eval/CrossEntropy') train_loss_count = all_run_text.count('loss/train/total') - expected_number_train_loss_count = (dataset_size / batch_size) + 1 # wandb includes it in the file one extra time + expected_number_train_loss_count = (dataset_size / batch_size) * 2 # wandb includes it twice per step expected_number_train_metrics_count = ( dataset_size / batch_size - ) + 2 # wandb includes it in the file two extra times + ) * 2 + 2 # wandb includes it twice per step plus two extra times expected_number_eval_metrics_count = 2 # wandb includes it in the file twice assert train_metrics_accuracy_count == expected_number_train_metrics_count assert train_loss_count == expected_number_train_loss_count