Skip to content

Commit

Permalink
Fix nightly test errors (#2045)
Browse files Browse the repository at this point in the history
* Revert tests tolerance
* Fix notebook parameter parsing
* Add notebook utils tests to test groups
* Fix notebooks
* Fix notebook unit tests
* Update evaluation metrics name map. Handle None for exp_var
* Fix smoke tests
* cleanup
* Fix functional test errors
* make notebook parameter update function to be private
* Fix benchmark notebook bug
* fix remaining bugs
---------

Signed-off-by: Jun Ki Min <[email protected]>
  • Loading branch information
loomlike authored Dec 21, 2023
1 parent b57cec2 commit 82ee6d3
Show file tree
Hide file tree
Showing 29 changed files with 176 additions and 249 deletions.
8 changes: 4 additions & 4 deletions examples/00_quick_start/fastai_movielens.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@
"from recommenders.datasets import movielens\n",
"from recommenders.datasets.python_splitters import python_stratified_split\n",
"from recommenders.models.fastai.fastai_utils import cartesian_product, score\n",
"from recommenders.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.evaluation.python_evaluation import map, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.evaluation.python_evaluation import rmse, mae, rsquared, exp_var\n",
"from recommenders.utils.notebook_utils import store_metadata\n",
"\n",
Expand Down Expand Up @@ -599,9 +599,9 @@
"metadata": {},
"outputs": [],
"source": [
"eval_map = map_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n",
" col_rating=RATING, col_prediction=PREDICTION, \n",
" relevancy_method=\"top_k\", k=TOP_K)"
"eval_map = map(test_df, top_k_scores, col_user=USER, col_item=ITEM, \n",
" col_rating=RATING, col_prediction=PREDICTION, \n",
" relevancy_method=\"top_k\", k=TOP_K)"
]
},
{
Expand Down
30 changes: 1 addition & 29 deletions examples/00_quick_start/naml_MIND.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -246,34 +246,6 @@
"model = NAMLModel(hparams, iterator, seed=seed)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"18693it [01:18, 239.62it/s]\n",
"7507it [00:30, 249.74it/s]\n",
"7538it [00:01, 6423.03it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'group_auc': 0.4807, 'mean_mrr': 0.2104, 'ndcg@5': 0.2141, 'ndcg@10': 0.2766}\n"
]
}
],
"source": [
"print(model.run_eval(valid_news_file, valid_behaviors_file))"
]
},
{
"cell_type": "code",
"execution_count": 8,
Expand Down Expand Up @@ -398,7 +370,7 @@
],
"source": [
"%%time\n",
"model.fit(train_news_file, train_behaviors_file,valid_news_file, valid_behaviors_file)"
"model.fit(train_news_file, train_behaviors_file, valid_news_file, valid_behaviors_file)"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions examples/00_quick_start/ncf_movielens.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@
"from recommenders.models.ncf.ncf_singlenode import NCF\n",
"from recommenders.models.ncf.dataset import Dataset as NCFDataset\n",
"from recommenders.datasets import movielens\n",
"from recommenders.utils.notebook_utils import is_jupyter\n",
"from recommenders.datasets.python_splitters import python_chrono_split\n",
"from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, \n",
" recall_at_k, get_top_k_items)\n",
"from recommenders.evaluation.python_evaluation import (\n",
" map, ndcg_at_k, precision_at_k, recall_at_k\n",
")\n",
"from recommenders.utils.notebook_utils import store_metadata\n",
"\n",
"print(\"System version: {}\".format(sys.version))\n",
Expand Down Expand Up @@ -334,7 +334,7 @@
}
],
"source": [
"eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_map = map(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
Expand Down
1 change: 0 additions & 1 deletion examples/00_quick_start/nrms_MIND.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@
"import numpy as np\n",
"import zipfile\n",
"from tqdm import tqdm\n",
"import scrapbook as sb\n",
"from tempfile import TemporaryDirectory\n",
"import tensorflow as tf\n",
"tf.get_logger().setLevel('ERROR') # only show error messages\n",
Expand Down
1 change: 0 additions & 1 deletion examples/00_quick_start/rlrmc_movielens.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@
"source": [
"import sys\n",
"import time\n",
"import numpy as np\n",
"import pandas as pd\n",
"\n",
"from recommenders.datasets.python_splitters import python_random_split\n",
Expand Down
4 changes: 2 additions & 2 deletions examples/00_quick_start/sar_movielens.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@
"from recommenders.datasets.python_splitters import python_stratified_split\n",
"from recommenders.models.sar import SAR\n",
"from recommenders.evaluation.python_evaluation import (\n",
" map_at_k,\n",
" map,\n",
" ndcg_at_k,\n",
" precision_at_k,\n",
" recall_at_k,\n",
Expand Down Expand Up @@ -509,7 +509,7 @@
"outputs": [],
"source": [
"# Ranking metrics\n",
"eval_map = map_at_k(test, top_k, col_user=\"userID\", col_item=\"itemID\", col_rating=\"rating\", k=TOP_K)\n",
"eval_map = map(test, top_k, col_user=\"userID\", col_item=\"itemID\", col_rating=\"rating\", k=TOP_K)\n",
"eval_ndcg = ndcg_at_k(test, top_k, col_user=\"userID\", col_item=\"itemID\", col_rating=\"rating\", k=TOP_K)\n",
"eval_precision = precision_at_k(test, top_k, col_user=\"userID\", col_item=\"itemID\", col_rating=\"rating\", k=TOP_K)\n",
"eval_recall = recall_at_k(test, top_k, col_user=\"userID\", col_item=\"itemID\", col_rating=\"rating\", k=TOP_K)\n"
Expand Down
10 changes: 4 additions & 6 deletions examples/00_quick_start/sasrec_amazon.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -57,23 +57,21 @@
}
],
"source": [
"import re\n",
"import sys\n",
"import os\n",
"from tempfile import TemporaryDirectory\n",
"import numpy as np\n",
"import pandas as pd \n",
"from collections import defaultdict\n",
"import tensorflow as tf\n",
"tf.get_logger().setLevel('ERROR') # only show error messages\n",
"\n",
"from recommenders.utils.timer import Timer\n",
"from recommenders.datasets.amazon_reviews import get_review_data\n",
"from recommenders.datasets.split_utils import filter_k_core\n",
"from recommenders.models.sasrec.model import SASREC\n",
"from recommenders.models.sasrec.ssept import SSEPT\n",
"from recommenders.models.sasrec.sampler import WarpSampler\n",
"from recommenders.models.sasrec.util import SASRecDataSet\n",
"from recommenders.utils.notebook_utils import store_metadata\n",
"from recommenders.utils.timer import Timer\n",
"\n",
"\n",
"print(f\"System version: {sys.version}\")\n",
"print(f\"Tensorflow version: {tf.__version__}\")"
Expand All @@ -98,7 +96,7 @@
"source": [
"num_epochs = 5\n",
"batch_size = 128\n",
"RANDOM_SEED = 100 # Set None for non-deterministic result\n",
"seed = 100 # Set None for non-deterministic result\n",
"\n",
"# data_dir = os.path.join(\"tests\", \"recsys_data\", \"RecSys\", \"SASRec-tf2\", \"data\")\n",
"data_dir = os.path.join(\"..\", \"..\", \"tests\", \"resources\", \"deeprec\", \"sasrec\")\n",
Expand Down
19 changes: 4 additions & 15 deletions examples/00_quick_start/sequential_recsys_amazondataset.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,6 @@
"source": [
"import os\n",
"import sys\n",
"import logging\n",
"from tempfile import TemporaryDirectory\n",
"import numpy as np\n",
"import tensorflow.compat.v1 as tf\n",
"tf.get_logger().setLevel('ERROR') # only show error messages\n",
"\n",
Expand All @@ -75,7 +72,6 @@
" prepare_hparams\n",
")\n",
"from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing\n",
"from recommenders.datasets.download_utils import maybe_download\n",
"from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel\n",
"#### to use the other model, use one of the following lines:\n",
"# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel\n",
Expand All @@ -92,16 +88,6 @@
"\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"## ATTENTION: change to the corresponding config file, e.g., caser.yaml for CaserModel, sum.yaml for SUMModel\n",
"yaml_file = '../../recommenders/models/deeprec/config/sli_rec.yaml' "
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand All @@ -123,7 +109,10 @@
"BATCH_SIZE = 400\n",
"RANDOM_SEED = SEED # Set None for non-deterministic result\n",
"\n",
"data_path = os.path.join(\"..\", \"..\", \"tests\", \"resources\", \"deeprec\", \"slirec\")"
"data_path = os.path.join(\"..\", \"..\", \"tests\", \"resources\", \"deeprec\", \"slirec\")\n",
"\n",
"## ATTENTION: change to the corresponding config file, e.g., caser.yaml for CaserModel, sum.yaml for SUMModel\n",
"yaml_file = '../../recommenders/models/deeprec/config/sli_rec.yaml' "
]
},
{
Expand Down
2 changes: 0 additions & 2 deletions examples/00_quick_start/wide_deep_movielens.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,6 @@
"import os\n",
"import sys\n",
"import math\n",
"import itertools\n",
"import numpy as np\n",
"import pandas as pd\n",
"import sklearn.preprocessing\n",
"from tempfile import TemporaryDirectory\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
" mae,\n",
" rsquared,\n",
" exp_var,\n",
" map_at_k,\n",
" map,\n",
" ndcg_at_k,\n",
" precision_at_k,\n",
" recall_at_k,\n",
Expand Down Expand Up @@ -689,7 +689,7 @@
"source": [
"cols[\"col_prediction\"] = \"Count\"\n",
"\n",
"eval_map = map_at_k(test, baseline_recommendations, k=TOP_K, **cols)\n",
"eval_map = map(test, baseline_recommendations, k=TOP_K, **cols)\n",
"eval_ndcg = ndcg_at_k(test, baseline_recommendations, k=TOP_K, **cols)\n",
"eval_precision = precision_at_k(test, baseline_recommendations, k=TOP_K, **cols)\n",
"eval_recall = recall_at_k(test, baseline_recommendations, k=TOP_K, **cols)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,15 +48,14 @@
"import sys\n",
"import torch\n",
"import cornac\n",
"import pandas as pd\n",
"\n",
"from recommenders.datasets import movielens\n",
"from recommenders.datasets.python_splitters import python_random_split\n",
"from recommenders.models.cornac.cornac_utils import predict_ranking\n",
"from recommenders.utils.timer import Timer\n",
"from recommenders.utils.constants import SEED\n",
"from recommenders.evaluation.python_evaluation import (\n",
" map_at_k,\n",
" map,\n",
" ndcg_at_k,\n",
" precision_at_k,\n",
" recall_at_k,\n",
Expand Down Expand Up @@ -508,7 +507,7 @@
}
],
"source": [
"eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_map = map(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@
"\n",
"from recommenders.datasets import movielens\n",
"from recommenders.datasets.python_splitters import python_random_split\n",
"from recommenders.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.evaluation.python_evaluation import map, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.models.cornac.cornac_utils import predict_ranking\n",
"from recommenders.utils.timer import Timer\n",
"from recommenders.utils.constants import SEED\n",
Expand Down Expand Up @@ -557,7 +557,7 @@
],
"source": [
"k = 10\n",
"eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=k)\n",
"eval_map = map(test, all_predictions, col_prediction='prediction', k=k)\n",
"eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=k)\n",
"eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=k)\n",
"eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=k)\n",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"from recommenders.models.deeprec.DataModel.ImplicitCF import ImplicitCF\n",
"from recommenders.datasets import movielens\n",
"from recommenders.datasets.python_splitters import python_stratified_split\n",
"from recommenders.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.evaluation.python_evaluation import map, ndcg_at_k, precision_at_k, recall_at_k\n",
"from recommenders.utils.constants import SEED as DEFAULT_SEED\n",
"from recommenders.models.deeprec.deeprec_utils import prepare_hparams\n",
"from recommenders.utils.notebook_utils import store_metadata\n",
Expand Down Expand Up @@ -640,7 +640,7 @@
}
],
"source": [
"eval_map = map_at_k(test, topk_scores, k=TOP_K)\n",
"eval_map = map(test, topk_scores, k=TOP_K)\n",
"eval_ndcg = ndcg_at_k(test, topk_scores, k=TOP_K)\n",
"eval_precision = precision_at_k(test, topk_scores, k=TOP_K)\n",
"eval_recall = recall_at_k(test, topk_scores, k=TOP_K)\n",
Expand Down
21 changes: 10 additions & 11 deletions examples/02_model_collaborative_filtering/ncf_deep_dive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,9 @@
"from recommenders.models.ncf.dataset import Dataset as NCFDataset\n",
"from recommenders.datasets import movielens\n",
"from recommenders.datasets.python_splitters import python_chrono_split\n",
"from recommenders.evaluation.python_evaluation import (rmse, mae, rsquared, exp_var, map_at_k, ndcg_at_k, precision_at_k, \n",
" recall_at_k, get_top_k_items)\n",
"from recommenders.evaluation.python_evaluation import (\n",
" map, ndcg_at_k, precision_at_k, recall_at_k\n",
")\n",
"from recommenders.utils.constants import SEED as DEFAULT_SEED\n",
"from recommenders.utils.notebook_utils import store_metadata\n",
"\n",
Expand Down Expand Up @@ -428,7 +429,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = NCF (\n",
"model = NCF(\n",
" n_users=data.n_users, \n",
" n_items=data.n_items,\n",
" model_type=\"NeuMF\",\n",
Expand Down Expand Up @@ -625,8 +626,7 @@
}
],
"source": [
"\n",
"eval_map = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_map = map(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_ndcg = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_precision = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_recall = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
Expand Down Expand Up @@ -718,7 +718,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = NCF (\n",
"model = NCF(\n",
" n_users=data.n_users, \n",
" n_items=data.n_items,\n",
" model_type=\"GMF\",\n",
Expand Down Expand Up @@ -760,7 +760,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = NCF (\n",
"model = NCF(\n",
" n_users=data.n_users, \n",
" n_items=data.n_items,\n",
" model_type=\"MLP\",\n",
Expand All @@ -771,8 +771,7 @@
" learning_rate=1e-3,\n",
" verbose=10,\n",
" seed=SEED\n",
")\n",
"\n"
")"
]
},
{
Expand Down Expand Up @@ -811,7 +810,7 @@
"metadata": {},
"outputs": [],
"source": [
"model = NCF (\n",
"model = NCF(\n",
" n_users=data.n_users, \n",
" n_items=data.n_items,\n",
" model_type=\"NeuMF\",\n",
Expand Down Expand Up @@ -905,7 +904,7 @@
}
],
"source": [
"eval_map2 = map_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_map2 = map(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_ndcg2 = ndcg_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_precision2 = precision_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
"eval_recall2 = recall_at_k(test, all_predictions, col_prediction='prediction', k=TOP_K)\n",
Expand Down
6 changes: 3 additions & 3 deletions examples/06_benchmarks/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
)
from recommenders.models.cornac.cornac_utils import predict_ranking
from recommenders.evaluation.python_evaluation import (
map_at_k,
map,
ndcg_at_k,
precision_at_k,
recall_at_k,
Expand Down Expand Up @@ -387,7 +387,7 @@ def ranking_metrics_pyspark(test, predictions, k=DEFAULT_K):
test, predictions, k=k, relevancy_method="top_k", **COL_DICT
)
return {
"MAP": rank_eval.map_at_k(),
"MAP": rank_eval.map(),
"nDCG@k": rank_eval.ndcg_at_k(),
"Precision@k": rank_eval.precision_at_k(),
"Recall@k": rank_eval.recall_at_k(),
Expand All @@ -405,7 +405,7 @@ def rating_metrics_python(test, predictions):

def ranking_metrics_python(test, predictions, k=DEFAULT_K):
return {
"MAP": map_at_k(test, predictions, k=k, **COL_DICT),
"MAP": map(test, predictions, k=k, **COL_DICT),
"nDCG@k": ndcg_at_k(test, predictions, k=k, **COL_DICT),
"Precision@k": precision_at_k(test, predictions, k=k, **COL_DICT),
"Recall@k": recall_at_k(test, predictions, k=k, **COL_DICT),
Expand Down
Loading

0 comments on commit 82ee6d3

Please sign in to comment.