diff --git a/docs/2cr/msmarco-v2-doc.html b/docs/2cr/msmarco-v2-doc.html index 5ce3ce4f9..1e9456f6a 100644 --- a/docs/2cr/msmarco-v2-doc.html +++ b/docs/2cr/msmarco-v2-doc.html @@ -157,6 +157,7 @@

MS MARCO V2 Document

TREC 2021 TREC 2022 + TREC 2023 dev dev2 @@ -172,6 +173,10 @@

MS MARCO V2 Document

nDCG@10 R@1K +
AP + nDCG@10 + R@1K + RR@100 R@1K @@ -190,9 +195,13 @@

MS MARCO V2 Document

0.5116 0.6739 -- -- -- +0.0801 +0.2993 +0.4107 + +0.1046 +0.2946 +0.5262 0.1572 0.8054 @@ -202,7 +211,7 @@

MS MARCO V2 Document

- +
@@ -214,10 +223,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -246,8 +258,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-doc-default.dl22.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-doc-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-doc-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-doc-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -267,7 +319,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -301,9 +353,13 @@

MS MARCO V2 Document

0.5776 0.6930 -- -- -- +0.1036 +0.3618 +0.4664 + +0.1341 +0.3405 +0.5662 0.1896 0.8542 @@ -313,7 +369,7 @@

MS MARCO V2 Document

- +
@@ -325,10 +381,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -357,8 +416,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-doc-segmented-default.dl22.txt \
+  --bm25 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-doc-segmented-default.dl23.txt \
+  --bm25 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-doc-segmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -378,7 +477,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -412,9 +511,13 @@

MS MARCO V2 Document

0.5304 0.7341 -- -- -- +0.0798 +0.2536 +0.4217 + +0.1174 +0.2462 +0.5232 0.0974 0.7699 @@ -424,7 +527,7 @@

MS MARCO V2 Document

- +
@@ -436,10 +539,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -468,8 +574,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-rm3-doc-default.dl22.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-rm3-doc-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -489,7 +635,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -523,9 +669,13 @@

MS MARCO V2 Document

0.6189 0.7678 -- -- -- +0.1260 +0.3834 +0.5114 + +0.1652 +0.3452 +0.5755 0.1660 0.8608 @@ -535,7 +685,7 @@

MS MARCO V2 Document

- +
@@ -547,10 +697,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -579,8 +732,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl22.txt \
+  --bm25 --rm3 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl23.txt \
+  --bm25 --rm3 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-rm3-doc-segmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -600,7 +793,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -635,9 +828,13 @@

MS MARCO V2 Document

0.5792 0.7066 -- -- -- +0.0977 +0.3539 +0.4301 + +0.1273 +0.3511 +0.5549 0.2011 0.8614 @@ -647,7 +844,7 @@

MS MARCO V2 Document

- +
@@ -659,10 +856,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -691,8 +891,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-d2q-t5 \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl22.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-d2q-t5 \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -712,7 +952,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -746,9 +986,13 @@

MS MARCO V2 Document

0.6289 0.7202 -- -- -- +0.1203 +0.3975 +0.4984 + +0.1460 +0.3612 +0.5967 0.2226 0.8982 @@ -758,7 +1002,7 @@

MS MARCO V2 Document

- +
@@ -770,10 +1014,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -802,8 +1049,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-d2q-t5 \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl22.txt \
+  --bm25 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-d2q-t5 \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl23.txt \
+  --bm25 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-d2q-t5-doc-segmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -823,7 +1110,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -857,9 +1144,13 @@

MS MARCO V2 Document

0.5375 0.7574 -- -- -- +0.0904 +0.2758 +0.4263 + +0.1246 +0.2681 +0.5616 0.1141 0.8191 @@ -869,7 +1160,7 @@

MS MARCO V2 Document

- +
@@ -881,10 +1172,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -913,8 +1207,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-d2q-t5-docvectors \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl22.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-d2q-t5-docvectors \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -934,7 +1268,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -968,9 +1302,13 @@

MS MARCO V2 Document

0.6559 0.7948 -- -- -- +0.1319 +0.3912 +0.5188 + +0.1699 +0.3454 +0.6006 0.1975 0.9002 @@ -980,7 +1318,7 @@

MS MARCO V2 Document

- +
@@ -992,10 +1330,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -1024,8 +1365,48 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-d2q-t5-docvectors \
+  --topics dl22 \
+  --output run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl22.txt \
+  --bm25 --rm3 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-d2q-t5-docvectors \
+  --topics dl23 \
+  --output run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl23.txt \
+  --bm25 --rm3 --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.bm25-rm3-d2q-t5-doc-segmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1045,7 +1426,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -1080,6 +1461,10 @@

MS MARCO V2 Document

0.6495 0.6787 +0.1180 +0.4165 +0.4779 + - - - @@ -1092,7 +1477,7 @@

MS MARCO V2 Document

- +
@@ -1104,10 +1489,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -1136,8 +1524,29 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-noexp-0shot \
+  --topics dl22-unicoil-noexp \
+  --output run.msmarco-v2-doc.unicoil-noexp.dl22.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.unicoil-noexp.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.unicoil-noexp.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.unicoil-noexp.dl22.txt
+
+
+ +
+ Not available.
+
Command to generate run on dev queries:
@@ -1157,7 +1566,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -1191,6 +1600,10 @@

MS MARCO V2 Document

0.6783 0.7069 +0.1400 +0.4451 +0.5235 + - - - @@ -1203,7 +1616,7 @@

MS MARCO V2 Document

- +
@@ -1215,10 +1628,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -1247,8 +1663,29 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-0shot \
+  --topics dl22-unicoil \
+  --output run.msmarco-v2-doc.unicoil.dl22.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.unicoil.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.unicoil.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.unicoil.dl22.txt
+
+
+ +
+ Not available.
+
Command to generate run on dev queries:
@@ -1268,7 +1705,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -1303,9 +1740,13 @@

MS MARCO V2 Document

0.6495 0.6787 -- -- -- +0.1180 +0.4165 +0.4779 + +0.1413 +0.3898 +0.5462 0.2231 0.8987 @@ -1315,7 +1756,7 @@

MS MARCO V2 Document

- +
@@ -1327,10 +1768,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -1360,8 +1804,50 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-noexp-0shot \
+  --topics dl22 \
+  --encoder castorini/unicoil-noexp-msmarco-passage \
+  --output run.msmarco-v2-doc.unicoil-noexp-otf.dl22.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-noexp-0shot \
+  --topics dl23 \
+  --encoder castorini/unicoil-noexp-msmarco-passage \
+  --output run.msmarco-v2-doc.unicoil-noexp-otf.dl23.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.unicoil-noexp-otf.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1382,7 +1868,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
@@ -1417,9 +1903,13 @@

MS MARCO V2 Document

0.6783 0.7069 -- -- -- +0.1400 +0.4451 +0.5235 + +0.1554 +0.4150 +0.5753 0.2419 0.9122 @@ -1429,7 +1919,7 @@

MS MARCO V2 Document

- +
@@ -1441,10 +1931,13 @@

MS MARCO V2 Document

TREC 2022 + @@ -1474,8 +1967,50 @@

MS MARCO V2 Document

- Not available.
+ Command to generate run on TREC 2022 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-0shot \
+  --topics dl22 \
+  --encoder castorini/unicoil-msmarco-passage \
+  --output run.msmarco-v2-doc.unicoil-otf.dl22.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl22-doc run.msmarco-v2-doc.unicoil-otf.dl22.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl22-doc run.msmarco-v2-doc.unicoil-otf.dl22.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl22-doc run.msmarco-v2-doc.unicoil-otf.dl22.txt
+
+
+ +
+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-doc-segmented-unicoil-0shot \
+  --topics dl23 \
+  --encoder castorini/unicoil-msmarco-passage \
+  --output run.msmarco-v2-doc.unicoil-otf.dl23.txt \
+  --impact --hits 10000 --max-passage-hits 1000 --max-passage
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -M 100 -m map dl23-doc run.msmarco-v2-doc.unicoil-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-doc run.msmarco-v2-doc.unicoil-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m recall.1000 dl23-doc run.msmarco-v2-doc.unicoil-otf.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1496,7 +2031,7 @@

MS MARCO V2 Document

-
+
Command to generate run on dev2 queries:
diff --git a/docs/2cr/msmarco-v2-passage.html b/docs/2cr/msmarco-v2-passage.html index 0155d1bbc..65f176dfe 100644 --- a/docs/2cr/msmarco-v2-passage.html +++ b/docs/2cr/msmarco-v2-passage.html @@ -157,6 +157,7 @@

MS MARCO V2 Passage

TREC 2021 TREC 2022 + TREC 2023 dev dev2 @@ -172,6 +173,10 @@

MS MARCO V2 Passage

nDCG@10 R@1K +
AP + nDCG@10 + R@1K + RR@100 R@1K @@ -194,6 +199,10 @@

MS MARCO V2 Passage

0.2692 0.3321 +0.0793 +0.2627 +0.4346 + 0.0719 0.5733 @@ -202,7 +211,7 @@

MS MARCO V2 Passage

- +
@@ -214,10 +223,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -267,6 +279,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -286,7 +319,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -324,6 +357,10 @@

MS MARCO V2 Passage

0.2742 0.3666 +0.0751 +0.2061 +0.4514 + 0.0872 0.6925 @@ -332,7 +369,7 @@

MS MARCO V2 Passage

- +
@@ -344,10 +381,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -397,6 +437,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-augmented \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-augmented-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-augmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -416,7 +477,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -454,6 +515,10 @@

MS MARCO V2 Passage

0.2686 0.3559 +0.0806 +0.2602 +0.4748 + 0.0630 0.5947 @@ -462,7 +527,7 @@

MS MARCO V2 Passage

- +
@@ -474,10 +539,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -527,6 +595,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-rm3-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-rm3-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-rm3-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-rm3-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -546,7 +635,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -584,6 +673,10 @@

MS MARCO V2 Passage

0.2571 0.3441 +0.0646 +0.1798 +0.4583 + 0.0667 0.6857 @@ -592,7 +685,7 @@

MS MARCO V2 Passage

- +
@@ -604,10 +697,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -657,6 +753,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-augmented \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-rm3-augmented-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-rm3-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-rm3-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-rm3-augmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -676,7 +793,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -715,6 +832,10 @@

MS MARCO V2 Passage

0.3599 0.4632 +0.1085 +0.3156 +0.5608 + 0.1072 0.7083 @@ -723,7 +844,7 @@

MS MARCO V2 Passage

- +
@@ -735,10 +856,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -788,6 +912,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-d2q-t5 \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-d2q-t5-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -807,7 +952,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -845,6 +990,10 @@

MS MARCO V2 Passage

0.3609 0.4739 +0.1078 +0.2926 +0.5689 + 0.1172 0.7647 @@ -853,7 +1002,7 @@

MS MARCO V2 Passage

- +
@@ -865,10 +1014,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -918,6 +1070,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-augmented-d2q-t5 \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-d2q-t5-augmented-default.dl23.txt \
+  --bm25
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-d2q-t5-augmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -937,7 +1110,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -975,6 +1148,10 @@

MS MARCO V2 Passage

0.3721 0.4964 +0.1135 +0.3132 +0.5978 + 0.0947 0.7181 @@ -983,7 +1160,7 @@

MS MARCO V2 Passage

- +
@@ -995,10 +1172,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1048,6 +1228,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-d2q-t5-docvectors \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-rm3-d2q-t5-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1067,7 +1268,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1105,6 +1306,10 @@

MS MARCO V2 Passage

0.3749 0.4914 +0.1059 +0.2719 +0.5623 + 0.0883 0.7607 @@ -1113,7 +1318,7 @@

MS MARCO V2 Passage

- +
@@ -1125,10 +1330,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1178,6 +1386,27 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-augmented-d2q-t5-docvectors \
+  --topics dl23 \
+  --output run.msmarco-v2-passage.bm25-rm3-d2q-t5-augmented-default.dl23.txt \
+  --bm25 --rm3
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-augmented-default.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.bm25-rm3-d2q-t5-augmented-default.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1197,7 +1426,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1236,6 +1465,10 @@

MS MARCO V2 Passage

0.4077 0.4423 +- +- +- + 0.1342 0.7010 @@ -1244,7 +1477,7 @@

MS MARCO V2 Passage

- +
@@ -1256,10 +1489,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1309,6 +1545,8 @@

MS MARCO V2 Passage

+ Not available.
+
Command to generate run on dev queries:
@@ -1328,7 +1566,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1366,6 +1604,10 @@

MS MARCO V2 Passage

0.4614 0.5253 +- +- +- + 0.1499 0.7616 @@ -1374,7 +1616,7 @@

MS MARCO V2 Passage

- +
@@ -1386,10 +1628,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1439,6 +1684,8 @@

MS MARCO V2 Passage

+ Not available.
+
Command to generate run on dev queries:
@@ -1458,7 +1705,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1497,6 +1744,10 @@

MS MARCO V2 Passage

0.4077 0.4423 +0.1112 +0.3262 +0.5070 + 0.1342 0.7010 @@ -1505,7 +1756,7 @@

MS MARCO V2 Passage

- +
@@ -1517,10 +1768,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1572,6 +1826,28 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-unicoil-noexp-0shot \
+  --topics dl23 \
+  --encoder castorini/unicoil-noexp-msmarco-passage \
+  --output run.msmarco-v2-passage.unicoil-noexp-otf.dl23.txt \
+  --hits 1000 --impact
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.unicoil-noexp-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.unicoil-noexp-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.unicoil-noexp-otf.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1592,7 +1868,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1631,6 +1907,10 @@

MS MARCO V2 Passage

0.4614 0.5253 +0.1437 +0.3855 +0.5541 + 0.1499 0.7616 @@ -1639,7 +1919,7 @@

MS MARCO V2 Passage

- +
@@ -1651,10 +1931,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1706,6 +1989,28 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-unicoil-0shot \
+  --topics dl23 \
+  --encoder castorini/unicoil-msmarco-passage \
+  --output run.msmarco-v2-passage.unicoil-otf.dl23.txt \
+  --hits 1000 --impact
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.unicoil-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.unicoil-otf.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.unicoil-otf.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1726,7 +2031,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
@@ -1765,6 +2070,10 @@

MS MARCO V2 Passage

0.5017 0.6159 +0.1772 +0.4746 +0.6412 + 0.1915 0.8710 @@ -1773,7 +2082,7 @@

MS MARCO V2 Passage

- +
@@ -1785,10 +2094,13 @@

MS MARCO V2 Passage

TREC 2022 + @@ -1840,6 +2152,28 @@

MS MARCO V2 Passage

+ Command to generate run on TREC 2023 queries: + +
+
python -m pyserini.search.lucene \
+  --threads 16 --batch-size 128 \
+  --index msmarco-v2-passage-slimr-pp-norefine-0shot \
+  --topics dl23 \
+  --encoder castorini/slimr-pp-msmarco-passage \
+  --output run.msmarco-v2-passage.slimr-pp.dl23.txt \
+  --hits 1000 --impact --min-idf 1
+
+Evaluation commands: + +
+
python -m pyserini.eval.trec_eval -c -l 2 -M 100 -m map dl23-passage run.msmarco-v2-passage.slimr-pp.dl23.txt
+python -m pyserini.eval.trec_eval -c -m ndcg_cut.10 dl23-passage run.msmarco-v2-passage.slimr-pp.dl23.txt
+python -m pyserini.eval.trec_eval -c -l 2 -m recall.1000 dl23-passage run.msmarco-v2-passage.slimr-pp.dl23.txt
+
+
+ +
+
Command to generate run on dev queries:
@@ -1860,7 +2194,7 @@

MS MARCO V2 Passage

-
+
Command to generate run on dev2 queries:
diff --git a/pyserini/2cr/msmarco-v2-doc.yaml b/pyserini/2cr/msmarco-v2-doc.yaml index 9d54a1f21..4364324e4 100644 --- a/pyserini/2cr/msmarco-v2-doc.yaml +++ b/pyserini/2cr/msmarco-v2-doc.yaml @@ -24,9 +24,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.0801 + nDCG@10: 0.2993 + R@1K: 0.4107 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1046 + nDCG@10: 0.2946 + R@1K: 0.5262 - name: bm25-doc-segmented-default display: BM25 doc segmented (k1=0.9, b=0.4) display-html: BM25 doc segmented (k1=0.9, b=0.4) @@ -52,9 +58,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.1036 + nDCG@10: 0.3618 + R@1K: 0.4664 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1341 + nDCG@10: 0.3405 + R@1K: 0.5662 - name: bm25-rm3-doc-default display: BM25+RM3 doc (k1=0.9, b=0.4) display-html: BM25+RM3 doc (k1=0.9, b=0.4) @@ -80,9 +92,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.0798 + nDCG@10: 0.2536 + R@1K: 0.4217 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1174 + nDCG@10: 0.2462 + R@1K: 0.5232 - name: bm25-rm3-doc-segmented-default display: BM25+RM3 doc segmented (k1=0.9, b=0.4) display-html: BM25+RM3 doc segmented (k1=0.9, b=0.4) @@ -108,9 +126,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.1260 + nDCG@10: 0.3834 + R@1K: 0.5114 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1652 + nDCG@10: 0.3452 + R@1K: 0.5755 - name: bm25-d2q-t5-doc-default display: BM25 w/ doc2query-T5 doc (k1=0.9, b=0.4) display-html: BM25 w/ doc2query-T5 doc (k1=0.9, b=0.4) @@ -136,9 +160,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.0977 + nDCG@10: 0.3539 + R@1K: 0.4301 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1273 + nDCG@10: 0.3511 + R@1K: 0.5549 - name: bm25-d2q-t5-doc-segmented-default display: BM25 w/ doc2query-T5 doc segmented (k1=0.9, b=0.4) display-html: BM25 w/ doc2query-T5 doc segmented (k1=0.9, b=0.4) @@ -164,9 +194,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.1203 + nDCG@10: 0.3975 + R@1K: 0.4984 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1460 + nDCG@10: 0.3612 + R@1K: 0.5967 - name: bm25-rm3-d2q-t5-doc-default display: BM25+RM3 w/ doc2query-T5 doc (k1=0.9, b=0.4) display-html: BM25+RM3 w/ doc2query-T5 doc (k1=0.9, b=0.4) @@ -192,9 +228,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.0904 + nDCG@10: 0.2758 + R@1K: 0.4263 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1246 + nDCG@10: 0.2681 + R@1K: 0.5616 - name: bm25-rm3-d2q-t5-doc-segmented-default display: BM25+RM3 w/ doc2query-T5 doc segmented (k1=0.9, b=0.4) display-html: BM25+RM3 w/ doc2query-T5 doc segmented (k1=0.9, b=0.4) @@ -220,9 +262,15 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.1319 + nDCG@10: 0.3912 + R@1K: 0.5188 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1699 + nDCG@10: 0.3454 + R@1K: 0.6006 - name: unicoil-noexp display: "uniCOIL (noexp): pre-encoded" display-html: "uniCOIL (noexp): pre-encoded queries" @@ -248,32 +296,11 @@ conditions: - topic_key: dl22-unicoil-noexp eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 - - name: unicoil-noexp-otf - display: "uniCOIL (noexp): query inference with PyTorch" - display-html: "uniCOIL (noexp): query inference with PyTorch" - command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-doc-segmented-unicoil-noexp-0shot --topics $topics --encoder castorini/unicoil-noexp-msmarco-passage --output $output --impact --hits 10000 --max-passage-hits 1000 --max-passage - topics: - - topic_key: msmarco-v2-doc-dev - eval_key: msmarco-v2-doc-dev - scores: - - MRR@100: 0.2231 - R@1K: 0.8987 - - topic_key: msmarco-v2-doc-dev2 - eval_key: msmarco-v2-doc-dev2 - scores: - - MRR@100: 0.2314 - R@1K: 0.8995 - - topic_key: dl21 - eval_key: dl21-doc - scores: - - MAP@100: 0.2587 - nDCG@10: 0.6495 - R@1K: 0.6787 - - topic_key: dl22 - eval_key: dl22-doc + - MAP@100: 0.1180 + nDCG@10: 0.4165 + R@1K: 0.4779 + - topic_key: dl23-unicoil-noexp + eval_key: dl23-doc scores: - MAP@100: 0 nDCG@10: 0 @@ -302,10 +329,49 @@ conditions: R@1K: 0.7069 - topic_key: dl22-unicoil eval_key: dl22-doc + scores: + - MAP@100: 0.1400 + nDCG@10: 0.4451 + R@1K: 0.5235 + - topic_key: dl23-unicoil + eval_key: dl23-doc scores: - MAP@100: 0 nDCG@10: 0 R@1K: 0 + - name: unicoil-noexp-otf + display: "uniCOIL (noexp): query inference with PyTorch" + display-html: "uniCOIL (noexp): query inference with PyTorch" + command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-doc-segmented-unicoil-noexp-0shot --topics $topics --encoder castorini/unicoil-noexp-msmarco-passage --output $output --impact --hits 10000 --max-passage-hits 1000 --max-passage + topics: + - topic_key: msmarco-v2-doc-dev + eval_key: msmarco-v2-doc-dev + scores: + - MRR@100: 0.2231 + R@1K: 0.8987 + - topic_key: msmarco-v2-doc-dev2 + eval_key: msmarco-v2-doc-dev2 + scores: + - MRR@100: 0.2314 + R@1K: 0.8995 + - topic_key: dl21 + eval_key: dl21-doc + scores: + - MAP@100: 0.2587 + nDCG@10: 0.6495 + R@1K: 0.6787 + - topic_key: dl22 + eval_key: dl22-doc + scores: + - MAP@100: 0.1180 + nDCG@10: 0.4165 + R@1K: 0.4779 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1413 + nDCG@10: 0.3898 + R@1K: 0.5462 - name: unicoil-otf display: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" display-html: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" @@ -330,6 +396,12 @@ conditions: - topic_key: dl22 eval_key: dl22-doc scores: - - MAP@100: 0 - nDCG@10: 0 - R@1K: 0 + - MAP@100: 0.1400 + nDCG@10: 0.4451 + R@1K: 0.5235 + - topic_key: dl23 + eval_key: dl23-doc + scores: + - MAP@100: 0.1554 + nDCG@10: 0.4150 + R@1K: 0.5753 diff --git a/pyserini/2cr/msmarco-v2-passage.yaml b/pyserini/2cr/msmarco-v2-passage.yaml index 2ec21c231..a76390baf 100644 --- a/pyserini/2cr/msmarco-v2-passage.yaml +++ b/pyserini/2cr/msmarco-v2-passage.yaml @@ -27,6 +27,12 @@ conditions: - MAP@100: 0.0325 nDCG@10: 0.2692 R@1K: 0.3321 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.0793 + nDCG@10: 0.2627 + R@1K: 0.4346 - name: bm25-augmented-default display: BM25 augmented passage (k1=0.9, b=0.4) display-html: BM25 augmented passage (k1=0.9, b=0.4) @@ -55,6 +61,12 @@ conditions: - MAP@100: 0.0346 nDCG@10: 0.2742 R@1K: 0.3666 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.0751 + nDCG@10: 0.2061 + R@1K: 0.4514 - name: bm25-rm3-default display: BM25+RM3 original passage (k1=0.9, b=0.4) display-html: BM25+RM3 original passage (k1=0.9, b=0.4) @@ -83,6 +95,12 @@ conditions: - MAP@100: 0.0310 nDCG@10: 0.2686 R@1K: 0.3559 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.0806 + nDCG@10: 0.2602 + R@1K: 0.4748 - name: bm25-rm3-augmented-default display: BM25+RM3 augmented passage (k1=0.9, b=0.4) display-html: BM25+RM3 augmented passage (k1=0.9, b=0.4) @@ -111,6 +129,12 @@ conditions: - MAP@100: 0.0318 nDCG@10: 0.2571 R@1K: 0.3441 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.0646 + nDCG@10: 0.1798 + R@1K: 0.4583 - name: bm25-d2q-t5-default display: BM25 w/ doc2query-T5 original passage (k1=0.9, b=0.4) display-html: BM25 w/ doc2query-T5 original passage (k1=0.9, b=0.4) @@ -139,6 +163,12 @@ conditions: - MAP@100: 0.0748 nDCG@10: 0.3599 R@1K: 0.4632 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1085 + nDCG@10: 0.3156 + R@1K: 0.5608 - name: bm25-d2q-t5-augmented-default display: BM25 w/ doc2query-T5 augmented passage (k1=0.9, b=0.4) display-html: BM25 w/ doc2query-T5 augmented passage (k1=0.9, b=0.4) @@ -167,6 +197,12 @@ conditions: - MAP@100: 0.0735 nDCG@10: 0.3609 R@1K: 0.4739 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1078 + nDCG@10: 0.2926 + R@1K: 0.5689 - name: bm25-rm3-d2q-t5-default display: BM25+RM3 w/ doc2query-T5 original passage (k1=0.9, b=0.4) display-html: BM25+RM3 w/ doc2query-T5 original passage (k1=0.9, b=0.4) @@ -195,6 +231,12 @@ conditions: - MAP@100: 0.0872 nDCG@10: 0.3721 R@1K: 0.4964 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1135 + nDCG@10: 0.3132 + R@1K: 0.5978 - name: bm25-rm3-d2q-t5-augmented-default display: BM25+RM3 w/ doc2query-T5 augmented passage (k1=0.9, b=0.4) display-html: BM25+RM3 w/ doc2query-T5 augmented passage (k1=0.9, b=0.4) @@ -223,6 +265,46 @@ conditions: - MAP@100: 0.0821 nDCG@10: 0.3749 R@1K: 0.4914 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1059 + nDCG@10: 0.2719 + R@1K: 0.5623 + - name: unicoil-noexp + display: "uniCOIL (noexp): pre-encoded" + display-html: "uniCOIL (noexp): pre-encoded queries" + display-row: (3a) + command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-noexp-0shot --topics $topics --output $output --hits 1000 --impact + topics: + - topic_key: msmarco-v2-passage-dev-unicoil-noexp + eval_key: msmarco-v2-passage-dev + scores: + - MRR@100: 0.1342 + R@1K: 0.7010 + - topic_key: msmarco-v2-passage-dev2-unicoil-noexp + eval_key: msmarco-v2-passage-dev2 + scores: + - MRR@100: 0.1385 + R@1K: 0.7114 + - topic_key: dl21-unicoil-noexp + eval_key: dl21-passage + scores: + - MAP@100: 0.2193 + nDCG@10: 0.5756 + R@1K: 0.6897 + - topic_key: dl22-unicoil-noexp + eval_key: dl22-passage + scores: + - MAP@100: 0.0754 + nDCG@10: 0.4077 + R@1K: 0.4423 + - topic_key: dl23-unicoil-noexp + eval_key: dl23-passage + scores: + - MAP@100: 0 + nDCG@10: 0 + R@1K: 0 - name: unicoil display: "uniCOIL (w/ doc2query-T5): pre-encoded" display-html: "uniCOIL (w/ doc2query-T5): pre-encoded queries" @@ -251,88 +333,78 @@ conditions: - MAP@100: 0.1050 nDCG@10: 0.4614 R@1K: 0.5253 - - name: unicoil-otf - display: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" - display-html: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" - command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-0shot --topics $topics --encoder castorini/unicoil-msmarco-passage --output $output --hits 1000 --impact - topics: - - topic_key: msmarco-v2-passage-dev - eval_key: msmarco-v2-passage-dev - scores: - - MRR@100: 0.1499 - R@1K: 0.7616 - - topic_key: msmarco-v2-passage-dev2 - eval_key: msmarco-v2-passage-dev2 + - topic_key: dl23-unicoil + eval_key: dl23-passage scores: - - MRR@100: 0.1577 - R@1K: 0.7671 - - topic_key: dl21 - eval_key: dl21-passage - scores: - - MAP@100: 0.2538 - nDCG@10: 0.6159 - R@1K: 0.7551 - - topic_key: dl22 - eval_key: dl22-passage - scores: - - MAP@100: 0.1050 - nDCG@10: 0.4614 - R@1K: 0.5253 - - name: unicoil-noexp - display: "uniCOIL (noexp): pre-encoded" - display-html: "uniCOIL (noexp): pre-encoded queries" - display-row: (3a) - command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-noexp-0shot --topics $topics --output $output --hits 1000 --impact + - MAP@100: 0 + nDCG@10: 0 + R@1K: 0 + - name: unicoil-noexp-otf + display: "uniCOIL (noexp): query inference with PyTorch" + display-html: "uniCOIL (noexp): query inference with PyTorch" + command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-noexp-0shot --topics $topics --encoder castorini/unicoil-noexp-msmarco-passage --output $output --hits 1000 --impact topics: - - topic_key: msmarco-v2-passage-dev-unicoil-noexp + - topic_key: msmarco-v2-passage-dev eval_key: msmarco-v2-passage-dev scores: - MRR@100: 0.1342 R@1K: 0.7010 - - topic_key: msmarco-v2-passage-dev2-unicoil-noexp + - topic_key: msmarco-v2-passage-dev2 eval_key: msmarco-v2-passage-dev2 scores: - MRR@100: 0.1385 R@1K: 0.7114 - - topic_key: dl21-unicoil-noexp + - topic_key: dl21 eval_key: dl21-passage scores: - MAP@100: 0.2193 nDCG@10: 0.5756 R@1K: 0.6897 - - topic_key: dl22-unicoil-noexp + - topic_key: dl22 eval_key: dl22-passage scores: - MAP@100: 0.0754 nDCG@10: 0.4077 R@1K: 0.4423 - - name: unicoil-noexp-otf - display: "uniCOIL (noexp): query inference with PyTorch" - display-html: "uniCOIL (noexp): query inference with PyTorch" - command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-noexp-0shot --topics $topics --encoder castorini/unicoil-noexp-msmarco-passage --output $output --hits 1000 --impact + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1112 + nDCG@10: 0.3262 + R@1K: 0.5070 + - name: unicoil-otf + display: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" + display-html: "uniCOIL (w/ doc2query-T5): query inference with PyTorch" + command: python -m pyserini.search.lucene --threads ${sparse_threads} --batch-size ${sparse_batch_size} --index msmarco-v2-passage-unicoil-0shot --topics $topics --encoder castorini/unicoil-msmarco-passage --output $output --hits 1000 --impact topics: - topic_key: msmarco-v2-passage-dev eval_key: msmarco-v2-passage-dev scores: - - MRR@100: 0.1342 - R@1K: 0.7010 + - MRR@100: 0.1499 + R@1K: 0.7616 - topic_key: msmarco-v2-passage-dev2 eval_key: msmarco-v2-passage-dev2 scores: - - MRR@100: 0.1385 - R@1K: 0.7114 + - MRR@100: 0.1577 + R@1K: 0.7671 - topic_key: dl21 eval_key: dl21-passage scores: - - MAP@100: 0.2193 - nDCG@10: 0.5756 - R@1K: 0.6897 + - MAP@100: 0.2538 + nDCG@10: 0.6159 + R@1K: 0.7551 - topic_key: dl22 eval_key: dl22-passage scores: - - MAP@100: 0.0754 - nDCG@10: 0.4077 - R@1K: 0.4423 + - MAP@100: 0.1050 + nDCG@10: 0.4614 + R@1K: 0.5253 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1437 + nDCG@10: 0.3855 + R@1K: 0.5541 - name: slimr-pp display: "SLIM++ (norefine, tau=0.5, min_idf=1)" display-html: "SLIM++ (norefine, tau=0.5, min_idf=1)" @@ -360,4 +432,10 @@ conditions: - MAP@100: 0.1616 nDCG@10: 0.5017 R@1K: 0.6159 + - topic_key: dl23 + eval_key: dl23-passage + scores: + - MAP@100: 0.1772 + nDCG@10: 0.4746 + R@1K: 0.6412 diff --git a/pyserini/2cr/msmarco.py b/pyserini/2cr/msmarco.py index 972ce6844..631008d4d 100644 --- a/pyserini/2cr/msmarco.py +++ b/pyserini/2cr/msmarco.py @@ -231,6 +231,11 @@ 'MAP@100': '-c -l 2 -M 100 -m map', 'nDCG@10': '-c -m ndcg_cut.10', 'R@1K': '-c -l 2 -m recall.1000' + }, + 'dl23-passage': { + 'MAP@100': '-c -l 2 -M 100 -m map', + 'nDCG@10': '-c -m ndcg_cut.10', + 'R@1K': '-c -l 2 -m recall.1000' } }, 'msmarco-v2-doc': { @@ -251,6 +256,11 @@ 'MAP@100': '-c -M 100 -m map', 'nDCG@10': '-c -m ndcg_cut.10', 'R@1K': '-c -m recall.1000' + }, + 'dl23-doc': { + 'MAP@100': '-c -M 100 -m map', + 'nDCG@10': '-c -m ndcg_cut.10', + 'R@1K': '-c -m recall.1000' } } } @@ -279,6 +289,8 @@ def find_msmarco_table_topic_set_key_v2(topic_key): key = 'dl21' elif topic_key.startswith('dl22'): key = 'dl22' + elif topic_key.startswith('dl23'): + key = 'dl23' return key @@ -325,7 +337,7 @@ def list_conditions(args): continue print(condition) -def _get_display_num(num: int) -> str: +def _get_display_num(num: int) -> str: return f'{num:.4f}' if num != 0 else '-' def _remove_commands(table, name, s, v1): @@ -337,6 +349,7 @@ def _remove_commands(table, name, s, v1): v2_unavilable_dict = { ('dl21', 'MAP@100'): 'Command to generate run on TREC 2021 queries:.*?
', ('dl22', 'MAP@100'): 'Command to generate run on TREC 2022 queries:.*?
', + ('dl23', 'MAP@100'): 'Command to generate run on TREC 2023 queries:.*?
', ('dev', 'MRR@100'): 'Command to generate run on dev queries:.*?
', ('dev2', 'MRR@100'): 'Command to generate run on dev2 queries:.*?
', } @@ -465,18 +478,23 @@ def generate_report(args): s4=_get_display_num(table[name]["dl22"]["MAP@100"]), s5=_get_display_num(table[name]["dl22"]["nDCG@10"]), s6=_get_display_num(table[name]["dl22"]["R@1K"]), - s7=_get_display_num(table[name]["dev"]["MRR@100"]), - s8=_get_display_num(table[name]["dev"]["R@1K"]), - s9=_get_display_num(table[name]["dev2"]["MRR@100"]), - s10=_get_display_num(table[name]["dev2"]["R@1K"]), + s7=_get_display_num(table[name]["dl23"]["MAP@100"]), + s8=_get_display_num(table[name]["dl23"]["nDCG@10"]), + s9=_get_display_num(table[name]["dl23"]["R@1K"]), + s10=_get_display_num(table[name]["dev"]["MRR@100"]), + s11=_get_display_num(table[name]["dev"]["R@1K"]), + s12=_get_display_num(table[name]["dev2"]["MRR@100"]), + s13=_get_display_num(table[name]["dev2"]["R@1K"]), cmd1=format_command(commands[name]['dl21']), cmd2=format_command(commands[name]['dl22']), - cmd3=format_command(commands[name]['dev']), - cmd4=format_command(commands[name]['dev2']), + cmd3=format_command(commands[name]['dl23']), + cmd4=format_command(commands[name]['dev']), + cmd5=format_command(commands[name]['dev2']), eval_cmd1=eval_commands[name]['dl21'], eval_cmd2=eval_commands[name]['dl22'], - eval_cmd3=eval_commands[name]['dev'], - eval_cmd4=eval_commands[name]['dev2'] + eval_cmd3=eval_commands[name]['dl23'], + eval_cmd4=eval_commands[name]['dev'], + eval_cmd5=eval_commands[name]['dev2'] ) # If we don't have scores, we want to remove the commands also. Use simple regexp substitution. @@ -606,9 +624,9 @@ def run_conditions(args): f'{table[name]["dl20"]["MAP"]:8.4f}{table[name]["dl20"]["nDCG@10"]:8.4f}{table[name]["dl20"]["R@1K"]:8.4f} ' + f'{table[name]["dev"]["MRR@10"]:8.4f}{table[name]["dev"]["R@1K"]:8.4f}') else: - print(' ' * 69 + 'TREC 2021' + ' ' * 16 + 'TREC 2022' + ' ' * 12 + 'MS MARCO dev' + ' ' * 5 + 'MS MARCO dev2') - print(' ' * 62 + 'MAP nDCG@10 R@1K MAP nDCG@10 R@1K MRR@100 R@1K MRR@100 R@1K') - print(' ' * 62 + '-' * 22 + ' ' + '-' * 22 + ' ' + '-' * 14 + ' ' + '-' * 14) + print(' ' * 69 + 'TREC 2021' + ' ' * 16 + 'TREC 2022' + ' ' * 16 + 'TREC 2023' + ' ' * 12 + 'MS MARCO dev' + ' ' * 5 + 'MS MARCO dev2') + print(' ' * 62 + 'MAP nDCG@10 R@1K MAP nDCG@10 R@1K MAP nDCG@10 R@1K MRR@100 R@1K MRR@100 R@1K') + print(' ' * 62 + '-' * 22 + ' ' + '-' * 22 + ' ' + '-' * 22 + ' ' + '-' * 14 + ' ' + '-' * 14) if args.condition: # If we've used --condition to specify a specific condition, print out only that row. @@ -624,6 +642,7 @@ def run_conditions(args): print(f'{table_keys[name]:60}' + f'{table[name]["dl21"]["MAP@100"]:8.4f}{table[name]["dl21"]["nDCG@10"]:8.4f}{table[name]["dl21"]["R@1K"]:8.4f} ' + f'{table[name]["dl22"]["MAP@100"]:8.4f}{table[name]["dl22"]["nDCG@10"]:8.4f}{table[name]["dl22"]["R@1K"]:8.4f} ' + + f'{table[name]["dl23"]["MAP@100"]:8.4f}{table[name]["dl23"]["nDCG@10"]:8.4f}{table[name]["dl23"]["R@1K"]:8.4f} ' + f'{table[name]["dev"]["MRR@100"]:8.4f}{table[name]["dev"]["R@1K"]:8.4f} ' + f'{table[name]["dev2"]["MRR@100"]:8.4f}{table[name]["dev2"]["R@1K"]:8.4f}') diff --git a/pyserini/2cr/msmarco_html_row_v2.template b/pyserini/2cr/msmarco_html_row_v2.template index 001077bfe..4559abf01 100644 --- a/pyserini/2cr/msmarco_html_row_v2.template +++ b/pyserini/2cr/msmarco_html_row_v2.template @@ -13,13 +13,17 @@ $s7 $s8 - $s9 + $s10 +$s11 + +$s12 +$s13 - +
@@ -31,10 +35,13 @@ TREC 2022 + @@ -68,7 +75,7 @@ Evaluation commands:
- Command to generate run on dev queries: + Command to generate run on TREC 2023 queries:
$cmd3
@@ -81,7 +88,7 @@ Evaluation commands:
 
   
- Command to generate run on dev2 queries: + Command to generate run on dev queries:
$cmd4
@@ -92,6 +99,19 @@ Evaluation commands:
 
${eval_cmd4}
+
+
+ Command to generate run on dev2 queries: + +
+
$cmd5
+
+Evaluation commands: + +
+
${eval_cmd5}
+
+
diff --git a/pyserini/2cr/msmarco_html_v2_doc.template b/pyserini/2cr/msmarco_html_v2_doc.template index 1d8d561e0..7d44926a5 100644 --- a/pyserini/2cr/msmarco_html_v2_doc.template +++ b/pyserini/2cr/msmarco_html_v2_doc.template @@ -157,6 +157,7 @@ Numbered rows correspond to tables in the paper; additional conditions are provi TREC 2021 TREC 2022 + TREC 2023 dev dev2 @@ -172,6 +173,10 @@ Numbered rows correspond to tables in the paper; additional conditions are provi nDCG@10 R@1K +
AP + nDCG@10 + R@1K + RR@100 R@1K diff --git a/pyserini/2cr/msmarco_html_v2_passage.template b/pyserini/2cr/msmarco_html_v2_passage.template index ce4611ccf..46959db1a 100644 --- a/pyserini/2cr/msmarco_html_v2_passage.template +++ b/pyserini/2cr/msmarco_html_v2_passage.template @@ -157,6 +157,7 @@ Numbered rows correspond to tables in the paper; additional conditions are provi TREC 2021 TREC 2022 + TREC 2023 dev dev2 @@ -172,6 +173,10 @@ Numbered rows correspond to tables in the paper; additional conditions are provi nDCG@10 R@1K +
AP + nDCG@10 + R@1K + RR@100 R@1K