diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 10e3ed15b4..0000000000 --- a/.gitignore +++ /dev/null @@ -1,180 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class -*.pretrain* -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST -license.txt - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -junit -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -.hypothesis/ -.pytest_cache/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Locust files: -locustfile.py - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# Environments -.env* -.venv* -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ - -# Tensorflow -*model_checkpoints -**/outputs - -# Azure ML -config.json -aml_config/ -aml_scripts/ -aml_data/ - -# Spark -spark-warehouse/ - -########################## -.DS_Store -.~* -Untitled*.ipynb -*-Copy*.ipynb -~$* -output.ipynb -conda*.yaml -reco_*.yaml -.idea/ -*.npz -*.data -*.dat -*.csv -*.zip -*.7z -.vscode/ -u.item -ml-100k/ -ml-10M100K/ -ml-1m/ -ml-20m/ -*.jar -*.item -*.pkl -*.pdf -.pretrain -*.npy -*.ckpt* -*.png -*.jpg -*.jpeg -*.gif -*.model -*.mml -nohup.out -*.svg -*.html -*.js -*.css -*.tff -*.woff -*.woff2 -*.eot - -##### kdd 2020 tutorial data folder -examples/07_tutorials/KDD2020-tutorial/data_folder/ - -*.vec -*.tsv -*.sh - -tests/**/resources/ -reports/ - -### pip folders -pip-wheel* diff --git a/AUTHORS.md b/AUTHORS.md deleted file mode 100644 index 54664fe0c2..0000000000 --- a/AUTHORS.md +++ /dev/null @@ -1,138 +0,0 @@ - - -Contributors to Recommenders -============================ -Recommenders is developed and maintained by a community of people interested in exploring recommendation algorithms and how best to deploy them in industry settings. The goal is to accelerate the workflow of any individual or organization working on recommender systems. Everyone is encouraged to contribute at any level to add and improve the implemented algorithms, notebooks and utilities. - -

- -

- -Maintainers (sorted alphabetically) ---------------------------------------- -Maintainers are actively supporting the project and have made substantial contributions to the repository.
-They have admin access to the repo and provide support reviewing issues and pull requests. - -* **[Andreas Argyriou](https://github.com/anargyri)** - * SAR single node improvements - * Reco utils metrics computations - * Tests for Surprise - * Model selection notebooks (AzureML for SVD, NNI) -* **[Jianxun Lian](https://github.com/Leavingseason)** - * xDeepFM algorithm - * DKN algorithm - * Review, development and optimization of MSRA algorithms. -* **[Jun Ki Min](https://github.com/loomlike)** - * ALS notebook - * Wide & Deep algorithm - * Hyperparameter tuning notebooks -* **[Miguel González-Fierro](https://github.com/miguelfierro)** - * Recommendation algorithms review, development and optimization. - * Reco utils review, development and optimization. - * Github statistics. - * Continuous integration build / test setup. -* **[Scott Graham](https://github.com/gramhagen)** - * Improving documentation - * VW notebook -* **[Simon Zhao](https://github.com/simonyansenzhao)** - * SARplus algorithm upgrade -* **[Tao Wu](https://github.com/wutaomsft)** - * Improving documentation - - -Contributors (sorted alphabetically) -------------------------------------- -[Full List of Contributors](https://github.com/Microsoft/Recommenders/graphs/contributors) - -To contributors: please add your name to the list when you submit a patch to the project. - -* **[Aaron He](https://github.com/AaronHeee)** - * Reco utils of NCF - * Deep dive notebook demonstrating the use of NCF -* **[Abir Chakraborty](https://github.com/aeroabir)** - * Self-Attentive Sequential Recommendation (SASRec) - * Sequential Recommendation Via Personalized Transformer (SSEPT) -* **[Alexandros Ioannou](https://github.com/aioannou96)** - * Standard VAE algorithm - * Multinomial VAE algorithm -* **[Bamdev Mishra](https://github.com/bamdevm)** - * RLRMC algorithm - * GeoIMC algorithm -* **[Beth Zeranski](https://github.com/bethz)** - * DevOps Pipelines used as a control plane to run existing Pytests on AzureML - * Automation scripts to configure AzureML environment for pipeline use -* **[Chuyang Ke](https://github.com/ChuyangKe)** - * Reco utils optimization - * Performance tests -* **[Dan Bianchini](https://github.com/danb27)** - * SAR Single Node algorithm improvements -* **[Dan Ciborowski](https://github.com/dciborow)** - * ALS operationalization notebook - * SAR PySpark improvement -* **[Daniel Schneider](https://github.com/danielsc)** - * FastAI notebook -* **[Evgenia Chroni](https://github.com/EvgeniaChroni)** - * Multinomial VAE algorithm - * Standard VAE algorithm -* **[Gianluca Campanella](https://github.com/gcampanella)** - * Spark optimization and support -* **[Heather Spetalnick (Shapiro)](https://github.com/heatherbshapiro)** - * AzureML documentation and support -* **[Jeremy Reynolds](https://github.com/jreynolds01)** - * Reference architecture -* **[Jianjie Liu](https://github.com/laserprec/)** - * GitHub Action Migration - * Test Infrastructure Optimization -* **[Kaisar Mussalim](https://github.com/kmussalim)** - * Multinomial VAE algorithm - * Standard VAE algorithm -* **[Le Zhang](https://github.com/yueguoguo)** - * Reco utils - * Continuous integration build / test setup - * Quickstart, deep dive, algorithm comparison, notebooks -* **[Markus Cozowicz](https://github.com/eisber)** - * SAR improvements on Spark -* **[Max Kaznady](https://github.com/maxkazmsft)** - * Early SAR single node code and port from another internal codebase - * Early SAR on Spark-SQL implementation - * SAR notebooks - * SAR unit / integration / smoke tests - * Early infrastructure design based on collapsing another internal project -* **[Mirco Milletarì](https://github.com/WessZumino)** - * Restricted Boltzmann Machine algorithm -* **[Nicolas Hug](https://github.com/NicolasHug)** - * Jupyter notebook demonstrating the use of [Surprise](https://github.com/NicolasHug/Surprise) library for recommendations -* **[Nikhil Joglekar](https://github.com/nikhilrj)** - * Improving documentation - * Quick start notebook - * Operationalization notebook -* **[Nile Wilson](https://github.com/niwilso)** - * Term Frequency - Inverse Document Frequency (TF-IDF) quickstart, utils -* **[Pradnyesh Vineet Joshi](https://github.com/pradnyeshjoshi)** - * GitHub workflows to trigger unit, smoke and integration tests in parallel on AzureML - * Scripts to configure AzureML environment -* **[Pratik Jawanpuria](https://github.com/pratikjawanpuria)** - * RLRMC algorithm - * GeoIMC algorithm -* **[Qi Wan](https://github.com/Qcactus)** - * LightGCN algorithm - * Deep dive notebook demonstrating the use of LightGCN -* **[Quoc-Tuan Truong](https://github.com/tqtg)** - * BPR notebook using [Cornac](https://github.com/PreferredAI/cornac) framework - * BiVAE notebook using [Cornac](https://github.com/PreferredAI/cornac) framework -* **[Robert Alexander](https://github.com/roalexan)** - * Windows test pipelines -* **[Satyadev Ntv](https://github.com/satyadevntv)** - * GeoIMC algorithm -* **[Yan Zhang](https://github.com/YanZhangADS)** - * Diversity metrics including coverage, novelty, diversity, and serendipity - * Diversity metrics evaluation sample notebook -* **[Yassine Khelifi](https://github.com/datashinobi)** - * SAR notebook quickstart -* **[Zhenhui Xu](https://github.com/motefly)** - * Reco utils of LightGBM - * LightGBM notebook quickstart - diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 21d30e6887..0000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,42 +0,0 @@ - - -# Recommenders Code of Conduct - -This code of conduct outlines expectations for participation in the Recommenders open source community, as well as steps for reporting unacceptable behavior. We are committed to providing a welcoming and inspiring community for all. People violating this code of conduct may be banned from the community. Our open source community strives to: - -* **Be friendly and patient**: Remember you might not be communicating in someone else's primary spoken or programming language, and others may not have your level of understanding. - -* **Be welcoming**: Our community welcomes and supports people of all backgrounds and identities. This includes, but is not limited to members of any race, ethnicity, culture, national origin, color, immigration status, social and economic class, educational level, sex, sexual orientation, gender identity and expression, age, size, family status, political belief, religion, and mental and physical ability. - -* **Be respectful**: We are a world-wide community of professionals, and we conduct ourselves professionally. Disagreement is no excuse for poor behavior and poor manners. Disrespectful and unacceptable behavior includes, but is not limited to: - 1. Violent threats or language. - 1. Discriminatory or derogatory jokes and language. - 1. Posting sexually explicit or violent material. - 1. Posting, or threatening to post, people's personally identifying information ("doxing"). - 1. Insults, especially those using discriminatory terms or slurs. - 1. Behavior that could be perceived as sexual attention. - 1. Advocating for or encouraging any of the above behaviors. - -* **Understand disagreements**: Disagreements, both social and technical, are useful learning opportunities. Seek to understand the other viewpoints and resolve differences constructively. - -* **Remember that we’re different**. The strength of our community comes from its diversity, people from a wide range of backgrounds. Different people have different perspectives on issues. Being unable to understand why someone holds a viewpoint doesn’t mean that they’re wrong. Focus on helping to resolve issues and learning from mistakes. - -* This code is not exhaustive or complete. It serves to capture our common understanding of a productive, collaborative environment. We expect the code to be followed in spirit as much as in the letter. - -## Reporting Code of Conduct Issues - -We encourage all communities to resolve issues on their own whenever possible. This builds a broader and deeper understanding and ultimately a healthier interaction. In the event that an issue cannot be resolved locally, please feel free to report your concerns by contacting conduct@lfai.foundation. In your report please include: - -1. Your contact information. -1. Names (real, usernames or pseudonyms) of any individuals involved. If there are additional witnesses, please include them as well. -1. Your account of what occurred, and if you believe the incident is ongoing. If there is a publicly available record (e.g. a mailing list archive or a public chat log), please include a link or attachment. -1. Any additional information that may be helpful. - -All reports will be reviewed by a multi-person team and will result in a response that is deemed necessary and appropriate to the circumstances. Where additional perspectives are needed, the team may seek insight from others with relevant expertise or experience. The confidentiality of the person reporting the incident will be kept at all times. Involved parties are never part of the review team. - -Anyone asked to stop unacceptable behavior is expected to comply immediately. If an individual engages in unacceptable behavior, the review team may take any action they deem appropriate, including a permanent ban from the community. - -*This code of conduct is based on the [template](http://todogroup.org/opencodeofconduct) established by the [TODO Group](http://todogroup.org/) and used by numerous other large communities and the Scope section from the [Contributor Covenant version 1.4](http://contributor-covenant.org/version/1/4/).* diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 4be1443b4d..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,75 +0,0 @@ - - -# Contribution Guidelines - -Contributions are welcomed! Here's a few things to know: - -- [Contribution Guidelines](#contribution-guidelines) - - [Steps to Contributing](#steps-to-contributing) - - [Coding Guidelines](#coding-guidelines) - - [Microsoft Contributor License Agreement](#microsoft-contributor-license-agreement) - - [Code of Conduct](#code-of-conduct) - - [Do not point fingers](#do-not-point-fingers) - - [Provide code feedback based on evidence](#provide-code-feedback-based-on-evidence) - - [Ask questions do not give answers](#ask-questions-do-not-give-answers) - -## Steps to Contributing - -**TL;DR for contributing: We use the staging branch to land all new features and fixes. To make a contribution, please create a branch from staging, make a modification in the code and create a PR to staging.** - -Here are the basic steps to get started with your first contribution. Please reach out with any questions. -1. Use [open issues](https://github.com/Microsoft/Recommenders/issues) to discuss the proposed changes. Create an issue describing changes if necessary to collect feedback. Also, please use provided labels to tag issues so everyone can easily sort issues of interest. -1. [Fork the repo](https://help.github.com/articles/fork-a-repo/) so you can make and test local changes. -1. Create a new branch **from staging branch** for the issue (please do not create a branch from main). We suggest prefixing the branch with your username and then a descriptive title: (e.g. gramhagen/update_contributing_docs) -1. Install recommenders package locally using the right optional dependency for your test and the dev option. (e.g. gpu test: `pip install -e .[gpu,dev]`) -1. Create a test that replicates the issue. -1. Make code changes. -1. Ensure unit tests pass and code style / formatting is consistent (see [wiki](https://github.com/Microsoft/Recommenders/wiki/Coding-Guidelines#python-and-docstrings-style) for more details). -1. When adding code to the repo, make sure you sign the commits, otherwise the tests will fail (see [how to sign the commits](https://github.com/recommenders-team/recommenders/wiki/How-to-sign-commits)). -1. Create a pull request against **staging** branch. - -Once the features included in a [milestone](https://github.com/microsoft/recommenders/milestones) are completed, we will merge staging into main. See the wiki for more detail about our [merge strategy](https://github.com/microsoft/recommenders/wiki/Strategy-to-merge-the-code-to-main-branch). - -## Coding Guidelines - -We strive to maintain high quality code to make the utilities in the repository easy to understand, use, and extend. We also work hard to maintain a friendly and constructive environment. We've found that having clear expectations on the development process and consistent style helps to ensure everyone can contribute and collaborate effectively. - -Please review the [coding guidelines](https://github.com/recommenders-team/recommenders/wiki/Coding-Guidelines) wiki page to see more details about the expectations for development approach and style. - -Apart from the official [Code of Conduct](CODE_OF_CONDUCT.md), in Recommenders team we adopt the following behaviors, to ensure a great working environment: - -#### Do not point fingers -Let’s be constructive. - -
-Click here to see some examples - -"This method is missing docstrings" instead of "YOU forgot to put docstrings". - -
- -#### Provide code feedback based on evidence - -When making code reviews, try to support your ideas based on evidence (papers, library documentation, stackoverflow, etc) rather than your personal preferences. - -
-Click here to see some examples - -"When reviewing this code, I saw that the Python implementation the metrics are based on classes, however, [scikit-learn](https://scikit-learn.org/stable/modules/classes.html#sklearn-metrics-metrics) and [tensorflow](https://www.tensorflow.org/api_docs/python/tf/metrics) use functions. We should follow the standard in the industry." - -
- -#### Ask questions do not give answers -Try to be empathic. - -
-Click here to see some examples - -* Would it make more sense if ...? -* Have you considered this ... ? - -
- diff --git a/GLOSSARY.md b/GLOSSARY.md deleted file mode 100644 index 1829c10575..0000000000 --- a/GLOSSARY.md +++ /dev/null @@ -1,65 +0,0 @@ - - -# Glossary - -* **A/B testing**: Methodology to evaluate the performance of a system in production. In the context of Recommendation Systems it is used to measure a machine learning model performance in real-time. It works by randomizing an environment response into two groups A and B, typically half of the traffic goes to the machine learning model output and the other half is left without model. By comparing the metrics from A and B branches, it is possible to evaluate whether it is beneficial the use of the model or not. A test with more than two groups it is named Multi-Variate Test. - -* **Click-through rate (CTR)**: Ratio of the number of users who click on a link over the total number of users that visited the page. CTR is a measure of the user engagement. - -* **Cold-start problem**: The cold start problem concerns the recommendations for users with no or few past history (new users). Providing recommendations to users with small past history becomes a difficult problem for collaborative filtering models because their learning and predictive ability is limited. Multiple research have been conducted in this direction using content-based filtering models or hybrid models. These models use auxiliary information like user or item metadata to overcome the cold start problem. - -* **Collaborative filtering algorithms (CF)**: CF algorithms make prediction of what is the likelihood of a user selecting an item based on the behavior of other users [1]. It assumes that if user A likes item X and Y, and user B likes item X, user B would probably like item Y. See the [list of CF examples in Recommenders repository](examples/02_model_collaborative_filtering). - -* **Content-based filtering algorithms (CB)**: CB algorithms make prediction of what is the likelihood of a user selecting an item based on the similarity of users and items among themselves [1]. It assumes that if user A lives in country X, has age Y and likes item Z, and user B lives in country X and has age Y, user B would probably like item Z. See the [list of CB examples in Recommenders repository](examples/02_model_content_based_filtering). - -* **Conversion rate**: In the context of e-commerce, the conversion rate is the ratio between the number of conversions (e.g. number of bought items) over the total number of visits. In the context of recommendation systems, conversion rate measures how efficient is an algorithm to provide recommendations that the user buys. - -* **Diversity metrics**: In the context of Recommendation Systems, diversity applies to a set of items, and is related to how different the items are with respect to each other [4]. - -* **Explicit interaction data**: When a user explicitly rate an item, typically between 1-5, the user is giving a value on the likeliness of the item. - -* **Hybrid filtering algorithms**: This type of recommendation system can implement a combination of collaborative and content-based filtering models. See the [list of examples in Recommenders repository](examples/02_model_hybrid). - -* **Implicit interaction data**: Implicit interactions are views or clicks that show a certain interest of the user about a specific items. These kind of data is more common but it doesn't define the intention of the user as clearly as the explicit data. - -* **Item information**: These include information about the item, some examples can be name, description, price, etc. - -* **Knowledge graph algorithms**: A knowledge graph algorithm is the one that uses knowledge graph data. In comparison with standard algorithms, it allows to explore graph's latent connections and improve the precision of results; the various relations in the graph can extend users' interest and increase the diversity of recommended items; also, these algorithms bring explainability to recommendation systems [5]. - -* **Knowledge graph data**: A knowledge graph is a directed heterogeneous graph in which nodes correspond to entities (items or item attributes) and edges correspond to relations [5]. - -* **Long tail items**: Typically, the item interaction distribution has the form of long tail, where items in the tail have a small number of interactions, corresponding to unpopular items, and items in the head have a large number of interactions [1,2]. From the algorithmic point of view, items in the tail suffer from the cold-start problem, making them hard for recommendation systems to use. However, from the business point of view, the items in the tail can be highly profitable, since these items are less popular, business can apply a higher margin to them. Recommendation systems that optimize metrics like novelty and diversity, can help to find users willing to get these long tail items. - -* **Multi-Variate Test (MVT)**: Methodology to evaluate the performance of a system in production. It is similar to A/B testing, with the difference that instead of having two test groups, MVT has multiples groups. - -* **News Information**: These include information about the news, some examples can be title, body, verticle, etc. - -* **Novelty metrics**: In Recommendation Systems, the novelty of a piece of information generally refers to how different it is with respect to "what has been previously seen" [4]. - -* **Online metrics**: Also named business metrics. They are the metrics computed online that reflect how the Recommendation System is helping the business to improve user engagement or revenue. These metrics include CTR, conversion rate, etc. - -* **Offline metrics**: Metrics computed offline for measuring the performance of the machine learning model. These metrics include ranking, rating, diversity and novelty metrics. - -* **Ranking metrics**: These are used to evaluate how relevant recommendations are for users. They include precision at k, recall at k, nDCG and MAP. See the [list of metrics in Recommenders repository](examples/03_evaluate). - -* **Rating metrics**: These are used to evaluate how accurate a recommender is at predicting ratings that users give to items. They include RMSE, MAE, R squared or explained variance. See the [list of metrics in Recommenders repository](examples/03_evaluate). - -* **Revenue per order**: The revenue per order optimization objective is the default optimization objective for the "Frequently bought together" recommendation model type. This optimization objective cannot be specified for any other recommendation model type. - -* **User information**: These include all information that define the user, some examples can be name, address, email, demographics, etc. - - -## References and resources - -[1] Aggarwal, Charu C. "Recommender systems". Vol. 1. Cham: Springer International Publishing, 2016. - -[2]. Park, Yoon-Joo, and Tuzhilin, Alexander. "The long tail of recommender systems and how to leverage it." In Proceedings of the 2008 ACM conference on Recommender systems, pp. 11-18. 2008. [Link to paper](http://people.stern.nyu.edu/atuzhili/pdf/Park-Tuzhilin-RecSys08-final.pdf). - -[3]. Armstrong, Robert. "The long tail: Why the future of business is selling less of more." Canadian Journal of Communication 33, no. 1 (2008). [Link to paper](https://www.cjc-online.ca/index.php/journal/article/view/1946/3141). - -[4] Castells, P., Vargas, S., and Wang, Jun. "Novelty and diversity metrics for recommender systems: choice, discovery and relevance." (2011). [Link to paper](https://repositorio.uam.es/bitstream/handle/10486/666094/novelty_castells_DDR_2011.pdf?sequence=1). - -[5] Wang, Hongwei; Zhao, Miao; Xie, Xing; Li, Wenjie and Guo, Minyi. "Knowledge Graph Convolutional Networks for Recommender Systems". The World Wide Web Conference WWW'19. 2019. [Link to paper](https://arxiv.org/abs/1904.12575). diff --git a/LICENSE b/LICENSE deleted file mode 100644 index e74b0d177a..0000000000 --- a/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ - MIT License - - Copyright (c) 2018-present Microsoft Corporation. - Copyright (c) 2023-present Recommenders contributors. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 4ee235b2a8..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -include recommenders/README.md diff --git a/NEWS.md b/NEWS.md deleted file mode 100644 index d417976c97..0000000000 --- a/NEWS.md +++ /dev/null @@ -1,139 +0,0 @@ - - -# What's New - -## Update October 10, 2023 - -We are pleased to announce that this repository (formerly known as Microsoft Recommenders, https://github.com/microsoft/recommenders), has joined the [Linux Foundation of AI and Data](https://lfaidata.foundation/) (LF AI & Data)! The new organization, `recommenders-team`, reflects this change. - -We hope this move makes it easy for anyone to contribute! Our objective continues to be building an ecosystem and a community to sustain open source innovations and collaborations in recommendation systems. - -## Update August 18, 2023 - -We moved to a new organization! Now to access the repo, instead of going to https://github.com/microsoft/recommenders, you need to go to https://github.com/recommenders-team/recommenders. The old URL will still resolve to the new one, but we recommend you to update your bookmarks. - -## Update February 7, 2023 - -We reached 15,000 stars!! - -## Update July 20, 2022 - -We have a new release [Recommenders 1.1.1](https://github.com/microsoft/recommenders/releases/tag/1.1.1)! - -We have introduced a new way of testing our repository using [AzureML](https://azure.microsoft.com/en-us/services/machine-learning/). With AzureML we are able to distribute our tests to different machines and run them in parallel. This allows us to test our repository on a wider range of machines and provides us with a much faster test cycle. Our total computation time went from around 9h to 35min, and we were able to reduce the costs by half. See more details [here](tests/README.md). - -We also made other improvements like faster evaluation metrics and improving SAR algorithm. - -## Update April 1, 2022 - -We have a new release [Recommenders 1.1.0](https://github.com/microsoft/recommenders/releases/tag/1.1.0)! -We have introduced the SASRec and SSEPT algorithms that are based on transformers. -In addition, we now have enabled Python 3.8 and 3.9. -We have also made improvements on the SARPlus algorithm, including support for Azure Synapse and Spark 3.2. -There are also bug fixes and improvements on NCF, RBM, LightGBM, LightFM, Scikit-Surprise, the stratified splitter, dockerfile -and upgrade to Scikit-Learn 1.0.2. - -## Update January 13, 2022 - -We have a new release [Recommenders 1.0.0](https://github.com/microsoft/recommenders/releases/tag/1.0.0)! The codebase has now migrated to TensorFlow versions 2.6 / 2.7 and to Spark version 3. In addition, there are a few changes in the dependencies and extras installed by `pip` (see [this guide](recommenders/README.md#optional-dependencies)). We have also made improvements in the code and the CI / CD pipelines. - -## Update September 27, 2021 - -We have a new release [Recommenders 0.7.0](https://github.com/microsoft/recommenders/releases/tag/0.7.0)! - -In this, we have changed the names of the folders which contain the source code, so that they are more informative. This implies that you will need to change any import statements that reference the recommenders package. Specifically, the folder `reco_utils` has been renamed to `recommenders` and its subfolders have been renamed according to [issue 1390](https://github.com/microsoft/recommenders/issues/1390). - -The recommenders package now supports three types of environments: [venv](https://docs.python.org/3/library/venv.html), [virtualenv](https://virtualenv.pypa.io/en/latest/index.html#) and [conda](https://docs.conda.io/projects/conda/en/latest/glossary.html?highlight=environment#conda-environment) with Python versions 3.6 and 3.7. - -We have also added new evaluation metrics: _novelty, serendipity, diversity and coverage_ (see the [evalution notebooks](examples/03_evaluate/README.md)). - -Code coverage reports are now generated for every PR, using [Codecov](https://about.codecov.io/). - -## Update June 21, 2021 - -We have a new release [Recommenders 0.6.0](https://github.com/microsoft/recommenders/releases/tag/0.6.0)! - -Recommenders is now on PyPI and can be installed using pip! In addition there are lots of bug fixes and utilities improvements. - -Here you can find the PyPi page: https://pypi.org/project/recommenders/ - -Here you can find the package documentation: https://microsoft-recommenders.readthedocs.io/en/latest/ - -## Update June 1, 2021 - -We have surpassed 10k stars! - -Microsoft Recommenders repository has reached 10k stars and has become the most starred open-source recommender system project on GitHub. - -Many thanks and congratulations to all the contributors to this repository! More advanced algorithms and best practices are yet to come! - -## Update February 4, 2021 - -We have a new release [Recommenders 0.5.0](https://github.com/microsoft/recommenders/releases/tag/0.5.0)! - -It comes with lots of bug fixes, optimizations and 3 new algorithms, GeoIMC, Standard VAE and Multinomial VAE. We also added tools to facilitate the use of Microsoft News dataset (MIND). In addition, we published our KDD2020 tutorial where we built a recommender of COVID papers using Microsoft Academic Graph. - -We also changed the default branch from master to main. Now when you download the repo, you will get main branch. - -## Update October 19, 2020 - -Leaderboard Reopen! - -[Microsoft News Recommendation Competition Winners Announced](https://msnews.github.io/competition.html) - -Congratulations to all participants and [winners](https://msnews.github.io/competition.html#winner) of the Microsoft News Recommendation Competition! In the last two months, over 200 participants from more than 90 institutions in 19 countries and regions joined the competition and collectively advanced the state of the art of news recommendation. - -The competition is based on the recently released [MIND dataset](https://msnews.github.io/), an open, large-scale English news dataset with impression logs. Details of the dataset are available at this [ACL paper](https://msnews.github.io/assets/doc/ACL2020_MIND.pdf). - -With the competition successfully closed, the [leaderboard](https://msnews.github.io/competition.html#leaderboard) is now reopn. Want to see if you can grab the top spot? Get familiar with the [news recommendation scenario](https://github.com/microsoft/recommenders/tree/main/scenarios/news). Then dive into some baselines such as [DKN](examples/00_quick_start/dkn_MIND.ipynb), [LSTUR](examples/00_quick_start/lstur_MIND.ipynb), [NAML](examples/00_quick_start/naml_MIND.ipynb), [NPA](examples/00_quick_start/npa_MIND.ipynb) and [NRMS](examples/00_quick_start/nrms_MIND.ipynb) and start hacking! - -## Update October 5, 2020 - -[Microsoft News Recommendation Competition Winners Announced, Leaderboard to Reopen!](https://msnews.github.io/competition.html) - -Congratulations to all participants and [winners](https://msnews.github.io/competition.html#winner) of the Microsoft News Recommendation Competition! In the last two months, over 200 participants from more than 90 institutions in 19 countries and regions joined the competition and collectively advanced the state of the art of news recommendation. - -The competition is based on the recently released [MIND dataset](https://msnews.github.io/), an open, large-scale English news dataset with impression logs. Details of the dataset are available at this [ACL paper](https://msnews.github.io/assets/doc/ACL2020_MIND.pdf). - -With the competition successfully closed, the [leaderboard](https://msnews.github.io/competition.html#leaderboard) will reopen soon. Want to see if you can grab the top spot? Get familiar with the [news recommendation scenario](https://github.com/microsoft/recommenders/tree/main/scenarios/news). Then dive into some baselines such as [DKN](examples/00_quick_start/dkn_MIND.ipynb), [LSTUR](examples/00_quick_start/lstur_MIND.ipynb), [NAML](examples/00_quick_start/naml_MIND.ipynb), [NPA](examples/00_quick_start/npa_MIND.ipynb) and [NRMS](examples/00_quick_start/nrms_MIND.ipynb) and get ready! - -## Update July 20, 2020 - -Microsoft is hosting a News Recommendation competition based on the [MIND dataset](https://msnews.github.io/), a large-scale English news dataset with impression logs. Check out the [ACL paper](https://msnews.github.io/assets/doc/ACL2020_MIND.pdf), get familiar with the [news recommendation scenario](https://github.com/microsoft/recommenders/tree/main/scenarios/news), and dive into the [quick start example](examples/00_quick_start/dkn_MIND.ipynb) using the DKN algorithm. Then try some other algorithms (NAML, NPA, NRMS, LSTUR) and tools in recommenders and submit your entry! - -## Update August 20, 2020 - -New release: [Recommenders 0.4.0](https://github.com/microsoft/recommenders/releases/tag/0.4.0) - -13 new algos and multiple fixes and new features - -## Update September 18, 2019 - -New release: [Recommenders 0.3.1](https://github.com/microsoft/recommenders/releases/tag/0.3.1) - -## Update September 15, 2019 - -We reached 5000 stars!! - -## Update June 3, 2019 - -New release: [Recommenders 0.3.0](https://github.com/microsoft/recommenders/releases/tag/0.3.0) - -## Update February 20, 2019 - -New release: [Recommenders 0.2.0](https://github.com/microsoft/recommenders/releases/tag/0.2.0) - -## Update February 11, 2019 - -We reached 1000 stars!! - -## Update December 12, 2018 - -First release: [Recommenders 0.1.1](https://github.com/microsoft/recommenders/releases/tag/0.1.1) - -## Update November 12, 2018 - -First pre-release: [Recommenders 0.1.0](https://github.com/microsoft/recommenders/releases/tag/0.1.0) diff --git a/README.md b/README.md deleted file mode 100644 index 01d8f0edf7..0000000000 --- a/README.md +++ /dev/null @@ -1,164 +0,0 @@ - - -# Recommenders - -[![Documentation Status](https://readthedocs.org/projects/microsoft-recommenders/badge/?version=latest)](https://microsoft-recommenders.readthedocs.io/en/latest/?badge=latest) - - - -## What's New (October, 2023) - -We are pleased to announce that this repository (formerly known as Microsoft Recommenders, https://github.com/microsoft/recommenders), has joined the [Linux Foundation of AI and Data](https://lfaidata.foundation/) (LF AI & Data)! The new organization, `recommenders-team`, reflects this change. - -We hope this move makes it easy for anyone to contribute! Our objective continues to be building an ecosystem and a community to sustain open source innovations and collaborations in recommendation systems. - -Now to access the repo, instead of going to https://github.com/microsoft/recommenders, you need to go to https://github.com/recommenders-team/recommenders. The old URL will still resolve to the new one, but we recommend that you update your bookmarks. - -## Introduction - -Recommenders objective is to assist researchers, developers and enthusiasts in prototyping, experimenting with and bringing to production a range of classic and state-of-the-art recommendation systems. - -Recommenders is a project under the [Linux Foundation of AI and Data](https://lfaidata.foundation/projects/). - -This repository contains examples and best practices for building recommendation systems, provided as Jupyter notebooks. The examples detail our learnings on five key tasks: - -- [Prepare Data](examples/01_prepare_data): Preparing and loading data for each recommendation algorithm. -- [Model](examples/00_quick_start): Building models using various classical and deep learning recommendation algorithms such as Alternating Least Squares ([ALS](https://spark.apache.org/docs/latest/api/python/_modules/pyspark/ml/recommendation.html#ALS)) or eXtreme Deep Factorization Machines ([xDeepFM](https://arxiv.org/abs/1803.05170)). -- [Evaluate](examples/03_evaluate): Evaluating algorithms with offline metrics. -- [Model Select and Optimize](examples/04_model_select_and_optimize): Tuning and optimizing hyperparameters for recommendation models. -- [Operationalize](examples/05_operationalize): Operationalizing models in a production environment on Azure. - -Several utilities are provided in [recommenders](recommenders) to support common tasks such as loading datasets in the format expected by different algorithms, evaluating model outputs, and splitting training/test data. Implementations of several state-of-the-art algorithms are included for self-study and customization in your own applications. See the [Recommenders documentation](https://readthedocs.org/projects/microsoft-recommenders/). - -For a more detailed overview of the repository, please see the documents on the [wiki page](https://github.com/microsoft/recommenders/wiki/Documents-and-Presentations). - -## Getting Started - -We recommend [conda](https://docs.conda.io/projects/conda/en/latest/glossary.html?highlight=environment#conda-environment) for environment management, and [VS Code](https://code.visualstudio.com/) for development. To install the recommenders package and run an example notebook on Linux/WSL: - -```bash -# 1. Install gcc if it is not installed already. On Ubuntu, this could done by using the command -# sudo apt install gcc - -# 2. Create and activate a new conda environment -conda create -n python=3.9 -conda activate - -# 3. Install the core recommenders package. It can run all the CPU notebooks. -pip install recommenders - -# 4. create a Jupyter kernel -python -m ipykernel install --user --name --display-name - -# 5. Clone this repo within VSCode or using command line: -git clone https://github.com/recommenders-team/recommenders.git - -# 6. Within VSCode: -# a. Open a notebook, e.g., examples/00_quick_start/sar_movielens.ipynb; -# b. Select Jupyter kernel ; -# c. Run the notebook. -``` - -For more information about setup on other platforms (e.g., Windows and macOS) and different configurations (e.g., GPU, Spark and experimental features), see the [Setup Guide](SETUP.md). - -In addition to the core package, several extras are also provided, including: -+ `[gpu]`: Needed for running GPU models. -+ `[spark]`: Needed for running Spark models. -+ `[dev]`: Needed for development for the repo. -+ `[all]`: `[gpu]`|`[spark]`|`[dev]` -+ `[experimental]`: Models that are not thoroughly tested and/or may require additional steps in installation. - -## Algorithms - -The table below lists the recommendation algorithms currently available in the repository. Notebooks are linked under the Example column as Quick start, showcasing an easy to run example of the algorithm, or as Deep dive, explaining in detail the math and implementation of the algorithm. - -| Algorithm | Type | Description | Example | -|-----------|------|-------------|---------| -| Alternating Least Squares (ALS) | Collaborative Filtering | Matrix factorization algorithm for explicit or implicit feedback in large datasets, optimized for scalability and distributed computing capability. It works in the PySpark environment. | [Quick start](examples/00_quick_start/als_movielens.ipynb) / [Deep dive](examples/02_model_collaborative_filtering/als_deep_dive.ipynb) | -| Attentive Asynchronous Singular Value Decomposition (A2SVD)* | Collaborative Filtering | Sequential-based algorithm that aims to capture both long and short-term user preferences using attention mechanism. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Cornac/Bayesian Personalized Ranking (BPR) | Collaborative Filtering | Matrix factorization algorithm for predicting item ranking with implicit feedback. It works in the CPU environment. | [Deep dive](examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb) | -| Cornac/Bilateral Variational Autoencoder (BiVAE) | Collaborative Filtering | Generative model for dyadic data (e.g., user-item interactions). It works in the CPU/GPU environment. | [Deep dive](examples/02_model_collaborative_filtering/cornac_bivae_deep_dive.ipynb) | -| Convolutional Sequence Embedding Recommendation (Caser) | Collaborative Filtering | Algorithm based on convolutions that aim to capture both user’s general preferences and sequential patterns. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Deep Knowledge-Aware Network (DKN)* | Content-Based Filtering | Deep learning algorithm incorporating a knowledge graph and article embeddings for providing news or article recommendations. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/dkn_MIND.ipynb) / [Deep dive](examples/02_model_content_based_filtering/dkn_deep_dive.ipynb) | -| Extreme Deep Factorization Machine (xDeepFM)* | Hybrid | Deep learning based algorithm for implicit and explicit feedback with user/item features. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/xdeepfm_criteo.ipynb) | -| FastAI Embedding Dot Bias (FAST) | Collaborative Filtering | General purpose algorithm with embeddings and biases for users and items. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/fastai_movielens.ipynb) | -| LightFM/Hybrid Matrix Factorization | Hybrid | Hybrid matrix factorization algorithm for both implicit and explicit feedbacks. It works in the CPU environment. | [Quick start](examples/02_model_hybrid/lightfm_deep_dive.ipynb) | -| LightGBM/Gradient Boosting Tree* | Content-Based Filtering | Gradient Boosting Tree algorithm for fast training and low memory usage in content-based problems. It works in the CPU/GPU/PySpark environments. | [Quick start in CPU](examples/00_quick_start/lightgbm_tinycriteo.ipynb) / [Deep dive in PySpark](examples/02_model_content_based_filtering/mmlspark_lightgbm_criteo.ipynb) | -| LightGCN | Collaborative Filtering | Deep learning algorithm which simplifies the design of GCN for predicting implicit feedback. It works in the CPU/GPU environment. | [Deep dive](examples/02_model_collaborative_filtering/lightgcn_deep_dive.ipynb) | -| GeoIMC* | Hybrid | Matrix completion algorithm that has into account user and item features using Riemannian conjugate gradients optimization and following a geometric approach. It works in the CPU environment. | [Quick start](examples/00_quick_start/geoimc_movielens.ipynb) | -| GRU | Collaborative Filtering | Sequential-based algorithm that aims to capture both long and short-term user preferences using recurrent neural networks. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Multinomial VAE | Collaborative Filtering | Generative model for predicting user/item interactions. It works in the CPU/GPU environment. | [Deep dive](examples/02_model_collaborative_filtering/multi_vae_deep_dive.ipynb) | -| Neural Recommendation with Long- and Short-term User Representations (LSTUR)* | Content-Based Filtering | Neural recommendation algorithm for recommending news articles with long- and short-term user interest modeling. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/lstur_MIND.ipynb) | -| Neural Recommendation with Attentive Multi-View Learning (NAML)* | Content-Based Filtering | Neural recommendation algorithm for recommending news articles with attentive multi-view learning. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/naml_MIND.ipynb) | -| Neural Collaborative Filtering (NCF) | Collaborative Filtering | Deep learning algorithm with enhanced performance for user/item implicit feedback. It works in the CPU/GPU environment.| [Quick start](examples/00_quick_start/ncf_movielens.ipynb) / [Deep dive](examples/02_model_collaborative_filtering/ncf_deep_dive.ipynb) | -| Neural Recommendation with Personalized Attention (NPA)* | Content-Based Filtering | Neural recommendation algorithm for recommending news articles with personalized attention network. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/npa_MIND.ipynb) | -| Neural Recommendation with Multi-Head Self-Attention (NRMS)* | Content-Based Filtering | Neural recommendation algorithm for recommending news articles with multi-head self-attention. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/nrms_MIND.ipynb) | -| Next Item Recommendation (NextItNet) | Collaborative Filtering | Algorithm based on dilated convolutions and residual network that aims to capture sequential patterns. It considers both user/item interactions and features. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Restricted Boltzmann Machines (RBM) | Collaborative Filtering | Neural network based algorithm for learning the underlying probability distribution for explicit or implicit user/item feedback. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/rbm_movielens.ipynb) / [Deep dive](examples/02_model_collaborative_filtering/rbm_deep_dive.ipynb) | -| Riemannian Low-rank Matrix Completion (RLRMC)* | Collaborative Filtering | Matrix factorization algorithm using Riemannian conjugate gradients optimization with small memory consumption to predict user/item interactions. It works in the CPU environment. | [Quick start](examples/00_quick_start/rlrmc_movielens.ipynb) | -| Simple Algorithm for Recommendation (SAR)* | Collaborative Filtering | Similarity-based algorithm for implicit user/item feedback. It works in the CPU environment. | [Quick start](examples/00_quick_start/sar_movielens.ipynb) / [Deep dive](examples/02_model_collaborative_filtering/sar_deep_dive.ipynb) | -| Self-Attentive Sequential Recommendation (SASRec) | Collaborative Filtering | Transformer based algorithm for sequential recommendation. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sasrec_amazon.ipynb) | -| Short-term and Long-term Preference Integrated Recommender (SLi-Rec)* | Collaborative Filtering | Sequential-based algorithm that aims to capture both long and short-term user preferences using attention mechanism, a time-aware controller and a content-aware controller. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Multi-Interest-Aware Sequential User Modeling (SUM)* | Collaborative Filtering | An enhanced memory network-based sequential user model which aims to capture users' multiple interests. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sequential_recsys_amazondataset.ipynb) | -| Sequential Recommendation Via Personalized Transformer (SSEPT) | Collaborative Filtering | Transformer based algorithm for sequential recommendation with User embedding. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/sasrec_amazon.ipynb) | -| Standard VAE | Collaborative Filtering | Generative Model for predicting user/item interactions. It works in the CPU/GPU environment. | [Deep dive](examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb) | -| Surprise/Singular Value Decomposition (SVD) | Collaborative Filtering | Matrix factorization algorithm for predicting explicit rating feedback in small datasets. It works in the CPU/GPU environment. | [Deep dive](examples/02_model_collaborative_filtering/surprise_svd_deep_dive.ipynb) | -| Term Frequency - Inverse Document Frequency (TF-IDF) | Content-Based Filtering | Simple similarity-based algorithm for content-based recommendations with text datasets. It works in the CPU environment. | [Quick start](examples/00_quick_start/tfidf_covid.ipynb) | -| Vowpal Wabbit (VW)* | Content-Based Filtering | Fast online learning algorithms, great for scenarios where user features / context are constantly changing. It uses the CPU for online learning. | [Deep dive](examples/02_model_content_based_filtering/vowpal_wabbit_deep_dive.ipynb) | -| Wide and Deep | Hybrid | Deep learning algorithm that can memorize feature interactions and generalize user features. It works in the CPU/GPU environment. | [Quick start](examples/00_quick_start/wide_deep_movielens.ipynb) | -| xLearn/Factorization Machine (FM) & Field-Aware FM (FFM) | Hybrid | Quick and memory efficient algorithm to predict labels with user/item features. It works in the CPU/GPU environment. | [Deep dive](examples/02_model_hybrid/fm_deep_dive.ipynb) | - -**NOTE**: * indicates algorithms invented/contributed by Microsoft. - -Independent or incubating algorithms and utilities are candidates for the [contrib](contrib) folder. This will house contributions which may not easily fit into the core repository or need time to refactor or mature the code and add necessary tests. - -| Algorithm | Type | Description | Example | -|-----------|------|-------------|---------| -| SARplus * | Collaborative Filtering | Optimized implementation of SAR for Spark | [Quick start](contrib/sarplus/README.md) | - -### Algorithm Comparison - -We provide a [benchmark notebook](examples/06_benchmarks/movielens.ipynb) to illustrate how different algorithms could be evaluated and compared. In this notebook, the MovieLens dataset is split into training/test sets at a 75/25 ratio using a stratified split. A recommendation model is trained using each of the collaborative filtering algorithms below. We utilize empirical parameter values reported in literature [here](http://mymedialite.net/examples/datasets.html). For ranking metrics we use `k=10` (top 10 recommended items). We run the comparison on a Standard NC6s_v2 [Azure DSVM](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/) (6 vCPUs, 112 GB memory and 1 P100 GPU). Spark ALS is run in local standalone mode. In this table we show the results on Movielens 100k, running the algorithms for 15 epochs. - -| Algo | MAP | nDCG@k | Precision@k | Recall@k | RMSE | MAE | R2 | Explained Variance | -| --- | --- | --- | --- | --- | --- | --- | --- | --- | -| [ALS](examples/00_quick_start/als_movielens.ipynb) | 0.004732 | 0.044239 | 0.048462 | 0.017796 | 0.965038 | 0.753001 | 0.255647 | 0.251648 | -| [BiVAE](examples/02_model_collaborative_filtering/cornac_bivae_deep_dive.ipynb) | 0.146126 | 0.475077 | 0.411771 | 0.219145 | N/A | N/A | N/A | N/A | -| [BPR](examples/02_model_collaborative_filtering/cornac_bpr_deep_dive.ipynb) | 0.132478 | 0.441997 | 0.388229 | 0.212522 | N/A | N/A | N/A | N/A | -| [FastAI](examples/00_quick_start/fastai_movielens.ipynb) | 0.025503 | 0.147866 | 0.130329 | 0.053824 | 0.943084 | 0.744337 | 0.285308 | 0.287671 | -| [LightGCN](examples/02_model_collaborative_filtering/lightgcn_deep_dive.ipynb) | 0.088526 | 0.419846 | 0.379626 | 0.144336 | N/A | N/A | N/A | N/A | -| [NCF](examples/02_model_hybrid/ncf_deep_dive.ipynb) | 0.107720 | 0.396118 | 0.347296 | 0.180775 | N/A | N/A | N/A | N/A | -| [SAR](examples/00_quick_start/sar_movielens.ipynb) | 0.110591 | 0.382461 | 0.330753 | 0.176385 | 1.253805 | 1.048484 | -0.569363 | 0.030474 | -| [SVD](examples/02_model_collaborative_filtering/surprise_svd_deep_dive.ipynb) | 0.012873 | 0.095930 | 0.091198 | 0.032783 | 0.938681 | 0.742690 | 0.291967 | 0.291971 | - -## Contributing - -This project welcomes contributions and suggestions. Before contributing, please see our [contribution guidelines](CONTRIBUTING.md). - -This project adheres to [Microsoft's Open Source Code of Conduct](CODE_OF_CONDUCT.md) in order to foster a welcoming and inspiring community for all. - -## Build Status - -These tests are the nightly builds, which compute the asynchronous tests. `main` is our principal branch and `staging` is our development branch. We use [pytest](https://docs.pytest.org/) for testing python utilities in [recommenders](recommenders) and the Recommenders [notebook executor](recommenders/utils/notebook_utils.py) for the [notebooks](examples). - -For more information about the testing pipelines, please see the [test documentation](tests/README.md). - -### AzureML Nightly Build Status - -The nightly build tests are run daily on AzureML. - -| Build Type | Branch | Status | | Branch | Status | -| --- | --- | --- | --- | --- | --- | -| **Linux CPU** | main | [![azureml-cpu-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-cpu-nightly.yml/badge.svg?branch=main)](https://github.com/microsoft/recommenders/actions/workflows/azureml-cpu-nightly.yml?query=branch%3Amain) | | staging | [![azureml-cpu-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-cpu-nightly.yml/badge.svg?branch=staging)](https://github.com/microsoft/recommenders/actions/workflows/azureml-cpu-nightly.yml?query=branch%3Astaging) | -| **Linux GPU** | main | [![azureml-gpu-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-gpu-nightly.yml/badge.svg?branch=main)](https://github.com/microsoft/recommenders/actions/workflows/azureml-gpu-nightly.yml?query=branch%3Amain) | | staging | [![azureml-gpu-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-gpu-nightly.yml/badge.svg?branch=staging)](https://github.com/microsoft/recommenders/actions/workflows/azureml-gpu-nightly.yml?query=branch%3Astaging) | -| **Linux Spark** | main | [![azureml-spark-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-spark-nightly.yml/badge.svg?branch=main)](https://github.com/microsoft/recommenders/actions/workflows/azureml-spark-nightly.yml?query=branch%3Amain) | | staging | [![azureml-spark-nightly](https://github.com/microsoft/recommenders/actions/workflows/azureml-spark-nightly.yml/badge.svg?branch=staging)](https://github.com/microsoft/recommenders/actions/workflows/azureml-spark-nightly.yml?query=branch%3Astaging) | - -## References - -- D. Li, J. Lian, L. Zhang, K. Ren, D. Lu, T. Wu, X. Xie, "Recommender Systems: Frontiers and Practices" (in Chinese), Publishing House of Electronics Industry, Beijing 2022. -- A. Argyriou, M. González-Fierro, and L. Zhang, "Microsoft Recommenders: Best Practices for Production-Ready Recommendation Systems", *WWW 2020: International World Wide Web Conference Taipei*, 2020. Available online: https://dl.acm.org/doi/abs/10.1145/3366424.3382692 -- L. Zhang, T. Wu, X. Xie, A. Argyriou, M. González-Fierro and J. Lian, "Building Production-Ready Recommendation System at Scale", *ACM SIGKDD Conference on Knowledge Discovery and Data Mining 2019 (KDD 2019)*, 2019. -- S. Graham, J.K. Min, T. Wu, "Microsoft recommenders: tools to accelerate developing recommender systems", *RecSys '19: Proceedings of the 13th ACM Conference on Recommender Systems*, 2019. Available online: https://dl.acm.org/doi/10.1145/3298689.3346967 diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 0756261078..0000000000 --- a/SECURITY.md +++ /dev/null @@ -1,18 +0,0 @@ - - -# Security Policy - -## Reporting a Vulnerability -If you think you have found a security vulnerability, please send a report to recommenders-security@lists.lfaidata.foundation. - -We don't currently have a PGP key, unfortunately. - -A Recommenders committer will send you a response indicating the next steps in handling your report. After the initial reply to your report, the committer will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance. - -Important: Please don't disclose the vulnerability before it has been fixed and announced, to protect our users. - -## Security announcements -Please subscribe to the [announcements mailing list](https://lists.lfaidata.foundation/g/recommenders-announce), where we post notifications and remediation details for security vulnerabilities. \ No newline at end of file diff --git a/SETUP.md b/SETUP.md deleted file mode 100644 index f06995e6e5..0000000000 --- a/SETUP.md +++ /dev/null @@ -1,169 +0,0 @@ - - -# Setup Guide - -The repo, including this guide, is tested on Linux. Where applicable, we document differences in [Windows](#windows-specific-instructions) and [MacOS](#macos-specific-instructions) although -such documentation may not always be up to date. - -## Extras - -In addition to the pip installable package, several extras are provided, including: -+ `[gpu]`: Needed for running GPU models. -+ `[spark]`: Needed for running Spark models. -+ `[dev]`: Needed for development. -+ `[all]`: `[gpu]`|`[spark]`|`[dev]` -+ `[experimental]`: Models that are not thoroughly tested and/or may require additional steps in installation). - -## Setup for Core Package - -Follow the [Getting Started](./README.md#Getting-Started) section in the [README](./README.md) to install the package and run the examples. - -## Setup for GPU - -```bash -# 1. Make sure CUDA is installed. - -# 2. Follow Steps 1-5 in the Getting Started section in README.md to install the package and Jupyter kernel, adding the gpu extra to the pip install command: -pip install recommenders[gpu] - -# 3. Within VSCode: -# a. Open a notebook with a GPU model, e.g., examples/00_quick_start/wide_deep_movielens.ipynb; -# b. Select Jupyter kernel ; -# c. Run the notebook. -``` - -## Setup for Spark - -```bash -# 1. Make sure JDK is installed. For example, OpenJDK 11 can be installed using the command -# sudo apt-get install openjdk-11-jdk - -# 2. Follow Steps 1-5 in the Getting Started section in README.md to install the package and Jupyter kernel, adding the spark extra to the pip install command: -pip install recommenders[spark] - -# 3. Within VSCode: -# a. Open a notebook with a Spark model, e.g., examples/00_quick_start/als_movielens.ipynb; -# b. Select Jupyter kernel ; -# c. Run the notebook. -``` - -## Setup for Azure Databricks - -The following instructions were tested on Azure Databricks Runtime 12.2 LTS (Apache Spark version 3.3.2) and 11.3 LTS (Apache Spark version 3.3.0). -As of April 2023, Databricks Runtime 13 is not yet supported as it is on Python 3.10. - -After an Azure Databricks cluster is provisioned: -```bash -# 1. Go to the "Compute" tab on the left of the page, click on the provisioned cluster and then click on "Libraries". -# 2. Click the "Install new" button. -# 3. In the popup window, select "PyPI" as the library source. Enter "recommenders[examples]" as the package name. Click "Install" to install the package. -``` - -### Prepare Azure Databricks for Operationalization - -This repository includes an end-to-end example notebook that uses Azure Databricks to estimate a recommendation model using matrix factorization with Alternating Least Squares, writes pre-computed recommendations to Azure Cosmos DB, and then creates a real-time scoring service that retrieves the recommendations from Cosmos DB. In order to execute that [notebook](examples/05_operationalize/als_movie_o16n.ipynb), you must install the Recommenders repository as a library (as described above), **AND** you must also install some additional dependencies. With the *Quick install* method, you just need to pass an additional option to the [installation script](tools/databricks_install.py). - -
-Quick install - -This option utilizes the installation script to do the setup. Just run the installation script -with an additional option. If you have already run the script once to upload and install the `Recommenders.egg` library, you can also add an `--overwrite` option: - -```{shell} -python tools/databricks_install.py --overwrite --prepare-o16n -``` - -This script does all of the steps described in the *Manual setup* section below. - -
- -
-Manual setup - -You must install three packages as libraries from PyPI: - -* `azure-cli==2.0.56` -* `azureml-sdk[databricks]==1.0.8` -* `pydocumentdb==2.3.3` - -You can follow instructions [here](https://docs.azuredatabricks.net/user-guide/libraries.html#install-a-library-on-a-cluster) for details on how to install packages from PyPI. - -Additionally, you must install the [spark-cosmosdb connector](https://docs.databricks.com/spark/latest/data-sources/azure/cosmosdb-connector.html) on the cluster. The easiest way to manually do that is to: - - -1. Download the [appropriate jar](https://search.maven.org/remotecontent?filepath=com/azure/cosmos/spark/azure-cosmos-spark_3-1_2-12/4.3.1/azure-cosmos-spark_3-1_2-12-4.3.1.jar) from MAVEN. **NOTE** This is the appropriate jar for spark versions `3.1.X`, and is the appropriate version for the recommended Azure Databricks run-time detailed above. See the [Databricks installation script](https://github.com/microsoft/recommenders/blob/main/tools/databricks_install.py#L45) for other Databricks runtimes. -2. Upload and install the jar by: - 1. Log into your `Azure Databricks` workspace - 2. Select the `Clusters` button on the left. - 3. Select the cluster on which you want to import the library. - 4. Select the `Upload` and `Jar` options, and click in the box that has the text `Drop JAR here` in it. - 5. Navigate to the downloaded `.jar` file, select it, and click `Open`. - 6. Click on `Install`. - 7. Restart the cluster. - -
- - -## Setup for Experimental - -The `xlearn` package has dependency on `cmake`. If one uses the `xlearn` related notebooks or scripts, make sure `cmake` is installed in the system. The easiest way to install on Linux is with apt-get: `sudo apt-get install -y build-essential cmake`. Detailed instructions for installing `cmake` from source can be found [here](https://cmake.org/install/). - -## Windows-Specific Instructions - -For Spark features to work, make sure Java and Spark are installed and respective environment varialbes such as `JAVA_HOME`, `SPARK_HOME` and `HADOOP_HOME` are set properly. Also make sure environment variables `PYSPARK_PYTHON` and `PYSPARK_DRIVER_PYTHON` are set to the the same python executable. - -## MacOS-Specific Instructions - -We recommend using [Homebrew](https://brew.sh/) to install the dependencies on macOS, including conda (please remember to add conda's path to `$PATH`). One may also need to install lightgbm using Homebrew before pip install the package. - -If zsh is used, one will need to use `pip install 'recommenders[]'` to install \. - -For Spark features to work, make sure Java and Spark are installed first. Also make sure environment variables `PYSPARK_PYTHON` and `PYSPARK_DRIVER_PYTHON` are set to the the same python executable. - - -## Setup for Developers - -If you want to contribute to Recommenders, please first read the [Contributing Guide](./CONTRIBUTING.md). You will notice that our development branch is `staging`. - -To start developing, you need to install the latest `staging` branch in local, the `dev` package, and any other package you want. For example, for starting developing with GPU models, you can use the following command: - -```bash -git checkout staging -pip install -e .[dev,gpu] -``` - -You can decide which packages you want to install, if you want to install all of them, you can use the following command: - -```bash -git checkout staging -pip install -e .[all] -``` - -## Test Environments - -Depending on the type of recommender system and the notebook that needs to be run, there are different computational requirements. - -Currently, tests are done on **Python CPU** (the base environment), **Python GPU** (corresponding to `[gpu]` extra above) and **PySpark** (corresponding to `[spark]` extra above). - -Another way is to build a docker image and use the functions inside a [docker container](#setup-guide-for-docker). - -Another alternative is to run all the recommender utilities directly from a local copy of the source code. This requires installing all the necessary dependencies from Anaconda and PyPI. For instructions on how to do this, see [this guide](conda.md). - -## Setup for Making a Release - -The process of making a new release and publishing it to [PyPI](https://pypi.org/project/recommenders/) is as follows: - -First make sure that the tag that you want to add, e.g. `0.6.0`, is added in [`recommenders.py/__init__.py`](recommenders.py/__init__.py). Follow the [contribution guideline](CONTRIBUTING.md) to add the change. - -1. Make sure that the code in main passes all the tests (unit and nightly tests). -1. Create a tag with the version number: e.g. `git tag -a 0.6.0 -m "Recommenders 0.6.0"`. -1. Push the tag to the remote server: `git push origin 0.6.0`. -1. When the new tag is pushed, a release pipeline is executed. This pipeline runs all the tests again (PR gate and nightly builds), generates a wheel and a tar.gz which are uploaded to a [GitHub draft release](https://github.com/microsoft/recommenders/releases). -1. Fill up the draft release with all the recent changes in the code. -1. Download the wheel and tar.gz locally, these files shouldn't have any bug, since they passed all the tests. -1. Install twine: `pip install twine` -1. Publish the wheel and tar.gz to PyPI: `twine upload recommenders*` - diff --git a/TEMP.html b/TEMP.html new file mode 100644 index 0000000000..383437324d --- /dev/null +++ b/TEMP.html @@ -0,0 +1,459 @@ + + + + + + + + + + + + Placeholder — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

Placeholder

+ +
+
+ +
+
+
+ + + + +
+ +
+

Placeholder#

+
+ + + + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 0000000000..e82774021d --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,466 @@ + + + + + + + + + + + Overview: module code — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

All modules for which code is available

+ + +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/amazon_reviews.html b/_modules/recommenders/datasets/amazon_reviews.html new file mode 100644 index 0000000000..35b959495f --- /dev/null +++ b/_modules/recommenders/datasets/amazon_reviews.html @@ -0,0 +1,939 @@ + + + + + + + + + + + recommenders.datasets.amazon_reviews — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.amazon_reviews

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import shutil
+import pandas as pd
+import gzip
+import random
+import logging
+import _pickle as cPickle
+
+from recommenders.utils.constants import SEED
+from recommenders.datasets.download_utils import maybe_download
+
+
+random.seed(SEED)
+logger = logging.getLogger()
+
+
+
[docs]def get_review_data(reviews_file): + """Downloads amazon review data (only), prepares in the required format + and stores in the same location + + Args: + reviews_file (str): Filename for downloaded reviews dataset. + """ + reviews_name = reviews_file.split("/")[-1] # *.json (for url) + download_and_extract(reviews_name, reviews_file) + reviews_output = _reviews_preprocessing(reviews_file) + return reviews_output
+ + +
[docs]def data_preprocessing( + reviews_file, + meta_file, + train_file, + valid_file, + test_file, + user_vocab, + item_vocab, + cate_vocab, + sample_rate=0.01, + valid_num_ngs=4, + test_num_ngs=9, + is_history_expanding=True, +): + """Create data for training, validation and testing from original dataset + + Args: + reviews_file (str): Reviews dataset downloaded from former operations. + meta_file (str): Meta dataset downloaded from former operations. + """ + reviews_output = _reviews_preprocessing(reviews_file) + meta_output = _meta_preprocessing(meta_file) + instance_output = _create_instance(reviews_output, meta_output) + _create_item2cate(instance_output) + sampled_instance_file = _get_sampled_data(instance_output, sample_rate=sample_rate) + preprocessed_output = _data_processing(sampled_instance_file) + if is_history_expanding: + _data_generating(preprocessed_output, train_file, valid_file, test_file) + else: + _data_generating_no_history_expanding( + preprocessed_output, train_file, valid_file, test_file + ) + _create_vocab(train_file, user_vocab, item_vocab, cate_vocab) + _negative_sampling_offline( + sampled_instance_file, valid_file, test_file, valid_num_ngs, test_num_ngs + )
+ + +def _create_vocab(train_file, user_vocab, item_vocab, cate_vocab): + + f_train = open(train_file, "r") + + user_dict = {} + item_dict = {} + cat_dict = {} + + logger.info("vocab generating...") + for line in f_train: + arr = line.strip("\n").split("\t") + uid = arr[1] + mid = arr[2] + cat = arr[3] + mid_list = arr[5] + cat_list = arr[6] + + if uid not in user_dict: + user_dict[uid] = 0 + user_dict[uid] += 1 + if mid not in item_dict: + item_dict[mid] = 0 + item_dict[mid] += 1 + if cat not in cat_dict: + cat_dict[cat] = 0 + cat_dict[cat] += 1 + if len(mid_list) == 0: + continue + for m in mid_list.split(","): + if m not in item_dict: + item_dict[m] = 0 + item_dict[m] += 1 + for c in cat_list.split(","): + if c not in cat_dict: + cat_dict[c] = 0 + cat_dict[c] += 1 + + sorted_user_dict = sorted(user_dict.items(), key=lambda x: x[1], reverse=True) + sorted_item_dict = sorted(item_dict.items(), key=lambda x: x[1], reverse=True) + sorted_cat_dict = sorted(cat_dict.items(), key=lambda x: x[1], reverse=True) + + uid_voc = {} + index = 0 + for key, value in sorted_user_dict: + uid_voc[key] = index + index += 1 + + mid_voc = {} + mid_voc["default_mid"] = 0 + index = 1 + for key, value in sorted_item_dict: + mid_voc[key] = index + index += 1 + + cat_voc = {} + cat_voc["default_cat"] = 0 + index = 1 + for key, value in sorted_cat_dict: + cat_voc[key] = index + index += 1 + + cPickle.dump(uid_voc, open(user_vocab, "wb")) + cPickle.dump(mid_voc, open(item_vocab, "wb")) + cPickle.dump(cat_voc, open(cate_vocab, "wb")) + + +def _negative_sampling_offline( + instance_input_file, valid_file, test_file, valid_neg_nums=4, test_neg_nums=49 +): + + columns = ["label", "user_id", "item_id", "timestamp", "cate_id"] + ns_df = pd.read_csv(instance_input_file, sep="\t", names=columns) + items_with_popular = list(ns_df["item_id"]) + + global item2cate + + # valid negative sampling + logger.info("start valid negative sampling") + with open(valid_file, "r") as f: + valid_lines = f.readlines() + write_valid = open(valid_file, "w") + for line in valid_lines: + write_valid.write(line) + words = line.strip().split("\t") + positive_item = words[2] + count = 0 + neg_items = set() + while count < valid_neg_nums: + neg_item = random.choice(items_with_popular) + if neg_item == positive_item or neg_item in neg_items: + continue + count += 1 + neg_items.add(neg_item) + words[0] = "0" + words[2] = neg_item + words[3] = item2cate[neg_item] + write_valid.write("\t".join(words) + "\n") + + # test negative sampling + logger.info("start test negative sampling") + with open(test_file, "r") as f: + test_lines = f.readlines() + write_test = open(test_file, "w") + for line in test_lines: + write_test.write(line) + words = line.strip().split("\t") + positive_item = words[2] + count = 0 + neg_items = set() + while count < test_neg_nums: + neg_item = random.choice(items_with_popular) + if neg_item == positive_item or neg_item in neg_items: + continue + count += 1 + neg_items.add(neg_item) + words[0] = "0" + words[2] = neg_item + words[3] = item2cate[neg_item] + write_test.write("\t".join(words) + "\n") + + +def _data_generating(input_file, train_file, valid_file, test_file, min_sequence=1): + """produce train, valid and test file from processed_output file + Each user's behavior sequence will be unfolded and produce multiple lines in trian file. + Like, user's behavior sequence: 12345, and this function will write into train file: + 1, 12, 123, 1234, 12345 + """ + f_input = open(input_file, "r") + f_train = open(train_file, "w") + f_valid = open(valid_file, "w") + f_test = open(test_file, "w") + logger.info("data generating...") + last_user_id = None + for line in f_input: + line_split = line.strip().split("\t") + tfile = line_split[0] + label = int(line_split[1]) + user_id = line_split[2] + movie_id = line_split[3] + date_time = line_split[4] + category = line_split[5] + + if tfile == "train": + fo = f_train + elif tfile == "valid": + fo = f_valid + elif tfile == "test": + fo = f_test + if user_id != last_user_id: + movie_id_list = [] + cate_list = [] + dt_list = [] + else: + history_clk_num = len(movie_id_list) + cat_str = "" + mid_str = "" + dt_str = "" + for c1 in cate_list: + cat_str += c1 + "," + for mid in movie_id_list: + mid_str += mid + "," + for dt_time in dt_list: + dt_str += dt_time + "," + if len(cat_str) > 0: + cat_str = cat_str[:-1] + if len(mid_str) > 0: + mid_str = mid_str[:-1] + if len(dt_str) > 0: + dt_str = dt_str[:-1] + if history_clk_num >= min_sequence: + fo.write( + line_split[1] + + "\t" + + user_id + + "\t" + + movie_id + + "\t" + + category + + "\t" + + date_time + + "\t" + + mid_str + + "\t" + + cat_str + + "\t" + + dt_str + + "\n" + ) + last_user_id = user_id + if label: + movie_id_list.append(movie_id) + cate_list.append(category) + dt_list.append(date_time) + + +def _data_generating_no_history_expanding( + input_file, train_file, valid_file, test_file, min_sequence=1 +): + """Produce train, valid and test file from processed_output file + Each user's behavior sequence will only produce one line in train file. + Like, user's behavior sequence: 12345, and this function will write into train file: 12345 + """ + f_input = open(input_file, "r") + f_train = open(train_file, "w") + f_valid = open(valid_file, "w") + f_test = open(test_file, "w") + logger.info("data generating...") + + last_user_id = None + last_movie_id = None + last_category = None + last_datetime = None + last_tfile = None + for line in f_input: + line_split = line.strip().split("\t") + tfile = line_split[0] + label = int(line_split[1]) + user_id = line_split[2] + movie_id = line_split[3] + date_time = line_split[4] + category = line_split[5] + + if last_tfile == "train": + fo = f_train + elif last_tfile == "valid": + fo = f_valid + elif last_tfile == "test": + fo = f_test + if user_id != last_user_id or tfile == "valid" or tfile == "test": + if last_user_id is not None: + history_clk_num = len( + movie_id_list # noqa: F821 undefined name 'movie_id_list' + ) + cat_str = "" + mid_str = "" + dt_str = "" + for c1 in cate_list[:-1]: # noqa: F821 undefined name 'cate_list' + cat_str += c1 + "," + for mid in movie_id_list[ # noqa: F821 undefined name 'movie_id_list' + :-1 + ]: + mid_str += mid + "," + for dt_time in dt_list[:-1]: # noqa: F821 undefined name 'dt_list' + dt_str += dt_time + "," + if len(cat_str) > 0: + cat_str = cat_str[:-1] + if len(mid_str) > 0: + mid_str = mid_str[:-1] + if len(dt_str) > 0: + dt_str = dt_str[:-1] + if history_clk_num > min_sequence: + fo.write( + line_split[1] + + "\t" + + last_user_id + + "\t" + + last_movie_id + + "\t" + + last_category + + "\t" + + last_datetime + + "\t" + + mid_str + + "\t" + + cat_str + + "\t" + + dt_str + + "\n" + ) + if tfile == "train" or last_user_id is None: + movie_id_list = [] + cate_list = [] + dt_list = [] + last_user_id = user_id + last_movie_id = movie_id + last_category = category + last_datetime = date_time + last_tfile = tfile + if label: + movie_id_list.append(movie_id) + cate_list.append(category) + dt_list.append(date_time) + + +def _create_item2cate(instance_file): + logger.info("creating item2cate dict") + global item2cate + instance_df = pd.read_csv( + instance_file, + sep="\t", + names=["label", "user_id", "item_id", "timestamp", "cate_id"], + ) + item2cate = instance_df.set_index("item_id")["cate_id"].to_dict() + + +def _get_sampled_data(instance_file, sample_rate): + logger.info("getting sampled data...") + global item2cate + output_file = instance_file + "_" + str(sample_rate) + columns = ["label", "user_id", "item_id", "timestamp", "cate_id"] + ns_df = pd.read_csv(instance_file, sep="\t", names=columns) + items_num = ns_df["item_id"].nunique() + items_with_popular = list(ns_df["item_id"]) + items_sample, count = set(), 0 + while count < int(items_num * sample_rate): + random_item = random.choice(items_with_popular) + if random_item not in items_sample: + items_sample.add(random_item) + count += 1 + ns_df_sample = ns_df[ns_df["item_id"].isin(items_sample)] + ns_df_sample.to_csv(output_file, sep="\t", index=None, header=None) + return output_file + + +def _meta_preprocessing(meta_readfile): + logger.info("start meta preprocessing...") + meta_writefile = meta_readfile + "_output" + meta_r = open(meta_readfile, "r") + meta_w = open(meta_writefile, "w") + for line in meta_r: + line_new = eval(line) + meta_w.write(line_new["asin"] + "\t" + line_new["categories"][0][-1] + "\n") + meta_r.close() + meta_w.close() + return meta_writefile + + +def _reviews_preprocessing(reviews_readfile): + logger.info("start reviews preprocessing...") + reviews_writefile = reviews_readfile + "_output" + reviews_r = open(reviews_readfile, "r") + reviews_w = open(reviews_writefile, "w") + for line in reviews_r: + line_new = eval(line.strip()) + reviews_w.write( + str(line_new["reviewerID"]) + + "\t" + + str(line_new["asin"]) + + "\t" + + str(line_new["unixReviewTime"]) + + "\n" + ) + reviews_r.close() + reviews_w.close() + return reviews_writefile + + +def _create_instance(reviews_file, meta_file): + logger.info("start create instances...") + dirs, _ = os.path.split(reviews_file) + output_file = os.path.join(dirs, "instance_output") + + f_reviews = open(reviews_file, "r") + user_dict = {} + item_list = [] + for line in f_reviews: + line = line.strip() + reviews_things = line.split("\t") + if reviews_things[0] not in user_dict: + user_dict[reviews_things[0]] = [] + user_dict[reviews_things[0]].append((line, float(reviews_things[-1]))) + item_list.append(reviews_things[1]) + + f_meta = open(meta_file, "r") + meta_dict = {} + for line in f_meta: + line = line.strip() + meta_things = line.split("\t") + if meta_things[0] not in meta_dict: + meta_dict[meta_things[0]] = meta_things[1] + + f_output = open(output_file, "w") + for user_behavior in user_dict: + sorted_user_behavior = sorted(user_dict[user_behavior], key=lambda x: x[1]) + for line, _ in sorted_user_behavior: + user_things = line.split("\t") + asin = user_things[1] + if asin in meta_dict: + f_output.write("1" + "\t" + line + "\t" + meta_dict[asin] + "\n") + else: + f_output.write("1" + "\t" + line + "\t" + "default_cat" + "\n") + + f_reviews.close() + f_meta.close() + f_output.close() + return output_file + + +def _data_processing(input_file): + logger.info("start data processing...") + dirs, _ = os.path.split(input_file) + output_file = os.path.join(dirs, "preprocessed_output") + + f_input = open(input_file, "r") + f_output = open(output_file, "w") + user_count = {} + for line in f_input: + line = line.strip() + user = line.split("\t")[1] + if user not in user_count: + user_count[user] = 0 + user_count[user] += 1 + f_input.seek(0) + i = 0 + last_user = None + for line in f_input: + line = line.strip() + user = line.split("\t")[1] + if user == last_user: + if i < user_count[user] - 2: + f_output.write("train" + "\t" + line + "\n") + elif i < user_count[user] - 1: + f_output.write("valid" + "\t" + line + "\n") + else: + f_output.write("test" + "\t" + line + "\n") + else: + last_user = user + i = 0 + if i < user_count[user] - 2: + f_output.write("train" + "\t" + line + "\n") + elif i < user_count[user] - 1: + f_output.write("valid" + "\t" + line + "\n") + else: + f_output.write("test" + "\t" + line + "\n") + i += 1 + return output_file + + +
[docs]def download_and_extract(name, dest_path): + """Downloads and extracts Amazon reviews and meta datafiles if they don’t already exist + + Args: + name (str): Category of reviews. + dest_path (str): File path for the downloaded file. + + Returns: + str: File path for the extracted file. + """ + dirs, _ = os.path.split(dest_path) + if not os.path.exists(dirs): + os.makedirs(dirs) + + file_path = os.path.join(dirs, name) + if not os.path.exists(file_path): + _download_reviews(name, dest_path) + _extract_reviews(file_path, dest_path) + + return file_path
+ + +def _download_reviews(name, dest_path): + """Downloads Amazon reviews datafile. + + Args: + name (str): Category of reviews + dest_path (str): File path for the downloaded file + """ + + url = ( + "http://snap.stanford.edu/data/amazon/productGraph/categoryFiles/" + + name + + ".gz" + ) + + dirs, file = os.path.split(dest_path) + maybe_download(url, file + ".gz", work_directory=dirs) + + +def _extract_reviews(file_path, zip_path): + """Extract Amazon reviews and meta datafiles from the raw zip files. + + To extract all files, + use ZipFile's extractall(path) instead. + + Args: + file_path (str): Destination path for datafile + zip_path (str): zipfile path + """ + with gzip.open(zip_path + ".gz", "rb") as zf, open(file_path, "wb") as f: + shutil.copyfileobj(zf, f) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/covid_utils.html b/_modules/recommenders/datasets/covid_utils.html new file mode 100644 index 0000000000..796ac7142f --- /dev/null +++ b/_modules/recommenders/datasets/covid_utils.html @@ -0,0 +1,580 @@ + + + + + + + + + + + recommenders.datasets.covid_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.covid_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import pandas as pd
+import requests
+
+
+
[docs]def load_pandas_df( + azure_storage_account_name="azureopendatastorage", + azure_storage_sas_token="", + container_name="covid19temp", + metadata_filename="metadata.csv", +): + """Loads the Azure Open Research COVID-19 dataset as a pd.DataFrame. + + The Azure COVID-19 Open Research Dataset may be found at https://azure.microsoft.com/en-us/services/open-datasets/catalog/covid-19-open-research/ + + Args: + azure_storage_account_name (str): Azure storage account name. + azure_storage_sas_token (str): Azure storage SAS token. + container_name (str): Azure storage container name. + metadata_filename (str): Name of file containing top-level metadata for the dataset. + + Returns: + metadata (pandas.DataFrame): Metadata dataframe. + """ + + # Load into dataframe + uri = "https://{acct}.blob.core.windows.net/{container}/{filename}{sas}".format( + acct=azure_storage_account_name, + container=container_name, + filename=metadata_filename, + sas=azure_storage_sas_token, + ) + return pd.read_csv(uri)
+ + +
[docs]def remove_duplicates(df, cols): + """Remove duplicated entries. + + Args: + df (pd.DataFrame): Pandas dataframe. + cols (list of str): Name of columns in which to look for duplicates. + + Returns: + df (pandas.DataFrame): Pandas dataframe with duplicate rows dropped. + + """ + for col in cols: + # Reset index + df = df.reset_index(drop=True) + + # Find where the identifier variable is duplicated + dup_rows = np.where(df.duplicated([col]))[0] + + # Drop duplicated rows + df = df.drop(dup_rows) + + return df
+ + +
[docs]def remove_nan(df, cols): + """Remove rows with NaN values in specified column. + + Args: + df (pandas.DataFrame): Pandas dataframe. + cols (list of str): Name of columns in which to look for NaN. + + Returns: + df (pandas.DataFrame): Pandas dataframe with invalid rows dropped. + + """ + for col in cols: + # Convert any empty string cells to nan + df[col].replace("", np.nan, inplace=True) + + # Remove NaN rows + df = df[df[col].notna()] + + return df
+ + +
[docs]def clean_dataframe(df): + """Clean up the dataframe. + + Args: + df (pandas.DataFrame): Pandas dataframe. + + Returns: + df (pandas.DataFrame): Cleaned pandas dataframe. + """ + + # Remove duplicated rows + cols = ["cord_uid", "doi"] + df = remove_duplicates(df, cols) + + # Remove rows without values in specified columns + cols = ["cord_uid", "doi", "title", "license", "url"] + df = remove_nan(df, cols) + + return df
+ + +
[docs]def retrieve_text( + entry, + container_name, + azure_storage_account_name="azureopendatastorage", + azure_storage_sas_token="", +): + """Retrieve body text from article of interest. + + Args: + entry (pd.Series): A single row from the dataframe (df.iloc[n]). + container_name (str): Azure storage container name. + azure_storage_account_name (str): Azure storage account name. + azure_storage_sas_token (str): Azure storage SAS token. + + Results: + text (str): Full text of the blob as a single string. + """ + + try: + filename = entry["pdf_json_files"] or entry["pmc_json_files"] + + # Extract text + uri = "https://{acct}.blob.core.windows.net/{container}/{filename}{sas}".format( + acct=azure_storage_account_name, + container=container_name, + filename=filename, + sas=azure_storage_sas_token, + ) + + data = requests.get(uri, headers={"Content-type": "application/json"}).json() + text = " ".join([paragraph["text"] for paragraph in data["body_text"]]) + + except Exception: + text = "" + + return text
+ + +
[docs]def get_public_domain_text( + df, + container_name, + azure_storage_account_name="azureopendatastorage", + azure_storage_sas_token="", +): + """Get all public domain text. + + Args: + df (pandas.DataFrame): Metadata dataframe for public domain text. + container_name (str): Azure storage container name. + azure_storage_account_name (str): Azure storage account name. + azure_storage_sas_token (str): Azure storage SAS token. + + Returns: + df_full (pandas.DataFrame): Dataframe with select metadata and full article text. + """ + # reset index + df = df.reset_index(drop=True) + + # Add in full_text + df["full_text"] = df.apply( + lambda row: retrieve_text( + row, container_name, azure_storage_account_name, azure_storage_sas_token + ), + axis=1, + ) + + # Remove rows with empty full_text + empty_rows = np.where(df["full_text"] == "")[0] + df = df.drop(empty_rows) + + # Only keep columns of interest + df_full = df[ + [ + "cord_uid", + "doi", + "title", + "publish_time", + "authors", + "journal", + "url", + "abstract", + "full_text", + ] + ] + df_full = df_full.reset_index() + + return df_full
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/criteo.html b/_modules/recommenders/datasets/criteo.html new file mode 100644 index 0000000000..495c748472 --- /dev/null +++ b/_modules/recommenders/datasets/criteo.html @@ -0,0 +1,592 @@ + + + + + + + + + + + recommenders.datasets.criteo — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.criteo

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+import pandas as pd
+import os
+import tarfile
+
+try:
+    from pyspark.sql.types import StructType, StructField, IntegerType, StringType
+except ImportError:
+    pass  # so the environment without spark doesn't break
+
+from recommenders.datasets.download_utils import maybe_download, download_path
+from recommenders.utils.notebook_utils import is_databricks
+
+
+CRITEO_URL = {
+    "full": "https://ndownloader.figshare.com/files/10082655",
+    "sample": "http://labs.criteo.com/wp-content/uploads/2015/04/dac_sample.tar.gz",
+}
+DEFAULT_HEADER = (
+    ["label"]
+    + ["int{0:02d}".format(i) for i in range(13)]
+    + ["cat{0:02d}".format(i) for i in range(26)]
+)
+
+
+
[docs]def load_pandas_df(size="sample", local_cache_path=None, header=DEFAULT_HEADER): + """Loads the Criteo DAC dataset as `pandas.DataFrame`. This function download, untar, and load the dataset. + + The dataset consists of a portion of Criteo’s traffic over a period + of 24 days. Each row corresponds to a display ad served by Criteo and the first + column indicates whether this ad has been clicked or not. + + There are 13 features taking integer values (mostly count features) and 26 + categorical features. The values of the categorical features have been hashed + onto 32 bits for anonymization purposes. + + The schema is: + + .. code-block:: python + + <label> <integer feature 1> ... <integer feature 13> <categorical feature 1> ... <categorical feature 26> + + More details (need to accept user terms to see the information): + http://labs.criteo.com/2013/12/download-terabyte-click-logs/ + + Args: + size (str): Dataset size. It can be "sample" or "full". + local_cache_path (str): Path where to cache the tar.gz file locally + header (list): Dataset header names. + + Returns: + pandas.DataFrame: Criteo DAC sample dataset. + """ + with download_path(local_cache_path) as path: + filepath = download_criteo(size, path) + filepath = extract_criteo(size, filepath) + df = pd.read_csv(filepath, sep="\t", header=None, names=header) + return df
+ + +
[docs]def load_spark_df( + spark, + size="sample", + header=DEFAULT_HEADER, + local_cache_path=None, + dbfs_datapath="dbfs:/FileStore/dac", + dbutils=None, +): + """Loads the Criteo DAC dataset as `pySpark.DataFrame`. + + The dataset consists of a portion of Criteo’s traffic over a period + of 24 days. Each row corresponds to a display ad served by Criteo and the first + column is indicates whether this ad has been clicked or not. + + There are 13 features taking integer values (mostly count features) and 26 + categorical features. The values of the categorical features have been hashed + onto 32 bits for anonymization purposes. + + The schema is: + + .. code-block:: python + + <label> <integer feature 1> ... <integer feature 13> <categorical feature 1> ... <categorical feature 26> + + More details (need to accept user terms to see the information): + http://labs.criteo.com/2013/12/download-terabyte-click-logs/ + + Args: + spark (pySpark.SparkSession): Spark session. + size (str): Dataset size. It can be "sample" or "full". + local_cache_path (str): Path where to cache the tar.gz file locally. + header (list): Dataset header names. + dbfs_datapath (str): Where to store the extracted files on Databricks. + dbutils (Databricks.dbutils): Databricks utility object. + + Returns: + pyspark.sql.DataFrame: Criteo DAC training dataset. + """ + with download_path(local_cache_path) as path: + filepath = download_criteo(size, path) + filepath = extract_criteo(size, filepath) + + if is_databricks(): + try: + # Driver node's file path + node_path = "file:" + filepath + # needs to be on dbfs to load + dbutils.fs.cp(node_path, dbfs_datapath, recurse=True) + path = dbfs_datapath + except Exception: + raise ValueError( + "To use on a Databricks notebook, dbutils object should be passed as an argument" + ) + else: + path = filepath + + schema = get_spark_schema(header) + df = spark.read.csv(path, schema=schema, sep="\t", header=False) + df.cache().count() # trigger execution to overcome spark's lazy evaluation + return df
+ + +
[docs]def download_criteo(size="sample", work_directory="."): + """Download criteo dataset as a compressed file. + + Args: + size (str): Size of criteo dataset. It can be "full" or "sample". + work_directory (str): Working directory. + + Returns: + str: Path of the downloaded file. + + """ + url = CRITEO_URL[size] + return maybe_download(url, work_directory=work_directory)
+ + +
[docs]def extract_criteo(size, compressed_file, path=None): + """Extract Criteo dataset tar. + + Args: + size (str): Size of Criteo dataset. It can be "full" or "sample". + compressed_file (str): Path to compressed file. + path (str): Path to extract the file. + + Returns: + str: Path to the extracted file. + + """ + if path is None: + folder = os.path.dirname(compressed_file) + extracted_dir = os.path.join(folder, "dac") + else: + extracted_dir = path + + with tarfile.open(compressed_file) as tar: + + def is_within_directory(directory, target): + + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + def safe_extract(tar, path=".", members=None, *, numeric_owner=False): + + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception("Attempted Path Traversal in Tar File") + + tar.extractall(path, members, numeric_owner=numeric_owner) + + safe_extract(tar, extracted_dir) + + filename_selector = {"sample": "dac_sample.txt", "full": "train.txt"} + return os.path.join(extracted_dir, filename_selector[size])
+ + +
[docs]def get_spark_schema(header=DEFAULT_HEADER): + """Get Spark schema from header. + + Args: + header (list): Dataset header names. + + Returns: + pyspark.sql.types.StructType: Spark schema. + """ + # create schema + schema = StructType() + # do label + ints + n_ints = 14 + for i in range(n_ints): + schema.add(StructField(header[i], IntegerType())) + # do categoricals + for i in range(26): + schema.add(StructField(header[i + n_ints], StringType())) + return schema
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/download_utils.html b/_modules/recommenders/datasets/download_utils.html new file mode 100644 index 0000000000..29cef9d0e3 --- /dev/null +++ b/_modules/recommenders/datasets/download_utils.html @@ -0,0 +1,492 @@ + + + + + + + + + + + recommenders.datasets.download_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.download_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import logging
+import requests
+import math
+import zipfile
+from contextlib import contextmanager
+from tempfile import TemporaryDirectory
+from tqdm import tqdm
+from retrying import retry
+
+
+log = logging.getLogger(__name__)
+
+
+
[docs]@retry(wait_random_min=1000, wait_random_max=5000, stop_max_attempt_number=5) +def maybe_download(url, filename=None, work_directory=".", expected_bytes=None): + """Download a file if it is not already downloaded. + + Args: + filename (str): File name. + work_directory (str): Working directory. + url (str): URL of the file to download. + expected_bytes (int): Expected file size in bytes. + + Returns: + str: File path of the file downloaded. + """ + if filename is None: + filename = url.split("/")[-1] + os.makedirs(work_directory, exist_ok=True) + filepath = os.path.join(work_directory, filename) + if not os.path.exists(filepath): + r = requests.get(url, stream=True) + if r.status_code == 200: + log.info(f"Downloading {url}") + total_size = int(r.headers.get("content-length", 0)) + block_size = 1024 + num_iterables = math.ceil(total_size / block_size) + with open(filepath, "wb") as file: + for data in tqdm( + r.iter_content(block_size), + total=num_iterables, + unit="KB", + unit_scale=True, + ): + file.write(data) + else: + log.error(f"Problem downloading {url}") + r.raise_for_status() + else: + log.info(f"File {filepath} already downloaded") + if expected_bytes is not None: + statinfo = os.stat(filepath) + if statinfo.st_size != expected_bytes: + os.remove(filepath) + raise IOError(f"Failed to verify {filepath}") + + return filepath
+ + +
[docs]@contextmanager +def download_path(path=None): + """Return a path to download data. If `path=None`, then it yields a temporal path that is eventually deleted, + otherwise the real path of the input. + + Args: + path (str): Path to download data. + + Returns: + str: Real path where the data is stored. + + Examples: + >>> with download_path() as path: + >>> ... maybe_download(url="http://example.com/file.zip", work_directory=path) + + """ + if path is None: + tmp_dir = TemporaryDirectory() + try: + yield tmp_dir.name + finally: + tmp_dir.cleanup() + else: + path = os.path.realpath(path) + yield path
+ + +
[docs]def unzip_file(zip_src, dst_dir, clean_zip_file=False): + """Unzip a file + + Args: + zip_src (str): Zip file. + dst_dir (str): Destination folder. + clean_zip_file (bool): Whether or not to clean the zip file. + """ + fz = zipfile.ZipFile(zip_src, "r") + for file in fz.namelist(): + fz.extract(file, dst_dir) + if clean_zip_file: + os.remove(zip_src)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/mind.html b/_modules/recommenders/datasets/mind.html new file mode 100644 index 0000000000..5acb30f896 --- /dev/null +++ b/_modules/recommenders/datasets/mind.html @@ -0,0 +1,831 @@ + + + + + + + + + + + recommenders.datasets.mind — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.mind

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import random
+import logging
+import json
+import numpy as np
+import re
+from tqdm import tqdm
+from nltk.tokenize import RegexpTokenizer
+
+from recommenders.datasets.download_utils import (
+    maybe_download,
+    download_path,
+    unzip_file,
+)
+
+
+URL_MIND_LARGE_TRAIN = (
+    "https://mind201910small.blob.core.windows.net/release/MINDlarge_train.zip"
+)
+URL_MIND_LARGE_VALID = (
+    "https://mind201910small.blob.core.windows.net/release/MINDlarge_dev.zip"
+)
+URL_MIND_SMALL_TRAIN = (
+    "https://mind201910small.blob.core.windows.net/release/MINDsmall_train.zip"
+)
+URL_MIND_SMALL_VALID = (
+    "https://mind201910small.blob.core.windows.net/release/MINDsmall_dev.zip"
+)
+URL_MIND_DEMO_TRAIN = (
+    "https://recodatasets.z20.web.core.windows.net/newsrec/MINDdemo_train.zip"
+)
+URL_MIND_DEMO_VALID = (
+    "https://recodatasets.z20.web.core.windows.net/newsrec/MINDdemo_dev.zip"
+)
+URL_MIND_DEMO_UTILS = (
+    "https://recodatasets.z20.web.core.windows.net/newsrec/MINDdemo_utils.zip"
+)
+
+URL_MIND = {
+    "large": (URL_MIND_LARGE_TRAIN, URL_MIND_LARGE_VALID),
+    "small": (URL_MIND_SMALL_TRAIN, URL_MIND_SMALL_VALID),
+    "demo": (URL_MIND_DEMO_TRAIN, URL_MIND_DEMO_VALID),
+}
+
+logger = logging.getLogger()
+
+
+
[docs]def download_mind(size="small", dest_path=None): + """Download MIND dataset + + Args: + size (str): Dataset size. One of ["small", "large"] + dest_path (str): Download path. If path is None, it will download the dataset on a temporal path + + Returns: + str, str: Path to train and validation sets. + """ + size_options = ["small", "large", "demo"] + if size not in size_options: + raise ValueError(f"Wrong size option, available options are {size_options}") + url_train, url_valid = URL_MIND[size] + with download_path(dest_path) as path: + train_path = maybe_download(url=url_train, work_directory=path) + valid_path = maybe_download(url=url_valid, work_directory=path) + return train_path, valid_path
+ + +
[docs]def extract_mind( + train_zip, + valid_zip, + train_folder="train", + valid_folder="valid", + clean_zip_file=True, +): + """Extract MIND dataset + + Args: + train_zip (str): Path to train zip file + valid_zip (str): Path to valid zip file + train_folder (str): Destination forder for train set + valid_folder (str): Destination forder for validation set + + Returns: + str, str: Train and validation folders + """ + root_folder = os.path.basename(train_zip) + train_path = os.path.join(root_folder, train_folder) + valid_path = os.path.join(root_folder, valid_folder) + unzip_file(train_zip, train_path, clean_zip_file=clean_zip_file) + unzip_file(valid_zip, valid_path, clean_zip_file=clean_zip_file) + return train_path, valid_path
+ + +
[docs]def read_clickhistory(path, filename): + """Read click history file + + Args: + path (str): Folder path + filename (str): Filename + + Returns: + list, dict: + - A list of user session with user_id, clicks, positive and negative interactions. + - A dictionary with user_id click history. + """ + userid_history = {} + with open(os.path.join(path, filename)) as f: + lines = f.readlines() + sessions = [] + for i in range(len(lines)): + _, userid, imp_time, click, imps = lines[i].strip().split("\t") + clicks = click.split(" ") + pos = [] + neg = [] + imps = imps.split(" ") + for imp in imps: + if imp.split("-")[1] == "1": + pos.append(imp.split("-")[0]) + else: + neg.append(imp.split("-")[0]) + userid_history[userid] = clicks + sessions.append([userid, clicks, pos, neg]) + return sessions, userid_history
+ + +def _newsample(nnn, ratio): + if ratio > len(nnn): + return random.sample(nnn * (ratio // len(nnn) + 1), ratio) + else: + return random.sample(nnn, ratio) + + +
[docs]def get_train_input(session, train_file_path, npratio=4): + """Generate train file. + + Args: + session (list): List of user session with user_id, clicks, positive and negative interactions. + train_file_path (str): Path to file. + npration (int): Ratio for negative sampling. + """ + fp_train = open(train_file_path, "w", encoding="utf-8") + for sess_id in range(len(session)): + sess = session[sess_id] + userid, _, poss, negs = sess + for i in range(len(poss)): + pos = poss[i] + neg = _newsample(negs, npratio) + fp_train.write("1 " + "train_" + userid + " " + pos + "\n") + for neg_ins in neg: + fp_train.write("0 " + "train_" + userid + " " + neg_ins + "\n") + fp_train.close() + if os.path.isfile(train_file_path): + logger.info(f"Train file {train_file_path} successfully generated") + else: + raise FileNotFoundError(f"Error when generating {train_file_path}")
+ + +
[docs]def get_valid_input(session, valid_file_path): + """Generate validation file. + + Args: + session (list): List of user session with user_id, clicks, positive and negative interactions. + valid_file_path (str): Path to file. + """ + fp_valid = open(valid_file_path, "w", encoding="utf-8") + for sess_id in range(len(session)): + userid, _, poss, negs = session[sess_id] + for i in range(len(poss)): + fp_valid.write( + "1 " + "valid_" + userid + " " + poss[i] + "%" + str(sess_id) + "\n" + ) + for i in range(len(negs)): + fp_valid.write( + "0 " + "valid_" + userid + " " + negs[i] + "%" + str(sess_id) + "\n" + ) + fp_valid.close() + if os.path.isfile(valid_file_path): + logger.info(f"Validation file {valid_file_path} successfully generated") + else: + raise FileNotFoundError(f"Error when generating {valid_file_path}")
+ + +
[docs]def get_user_history(train_history, valid_history, user_history_path): + """Generate user history file. + + Args: + train_history (list): Train history. + valid_history (list): Validation history + user_history_path (str): Path to file. + """ + fp_user_history = open(user_history_path, "w", encoding="utf-8") + for userid in train_history: + fp_user_history.write( + "train_" + userid + " " + ",".join(train_history[userid]) + "\n" + ) + for userid in valid_history: + fp_user_history.write( + "valid_" + userid + " " + ",".join(valid_history[userid]) + "\n" + ) + fp_user_history.close() + if os.path.isfile(user_history_path): + logger.info(f"User history file {user_history_path} successfully generated") + else: + raise FileNotFoundError(f"Error when generating {user_history_path}")
+ + +def _read_news(filepath, news_words, news_entities, tokenizer): + with open(filepath, encoding="utf-8") as f: + lines = f.readlines() + for line in lines: + splitted = line.strip("\n").split("\t") + news_words[splitted[0]] = tokenizer.tokenize(splitted[3].lower()) + news_entities[splitted[0]] = [] + for entity in json.loads(splitted[6]): + news_entities[splitted[0]].append( + (entity["SurfaceForms"], entity["WikidataId"]) + ) + return news_words, news_entities + + +
[docs]def get_words_and_entities(train_news, valid_news): + """Load words and entities + + Args: + train_news (str): News train file. + valid_news (str): News validation file. + + Returns: + dict, dict: Words and entities dictionaries. + """ + news_words = {} + news_entities = {} + tokenizer = RegexpTokenizer(r"\w+") + news_words, news_entities = _read_news( + train_news, news_words, news_entities, tokenizer + ) + news_words, news_entities = _read_news( + valid_news, news_words, news_entities, tokenizer + ) + return news_words, news_entities
+ + +
[docs]def download_and_extract_glove(dest_path): + """Download and extract the Glove embedding + + Args: + dest_path (str): Destination directory path for the downloaded file + + Returns: + str: File path where Glove was extracted. + """ + # url = "http://nlp.stanford.edu/data/glove.6B.zip" + url = "https://huggingface.co/stanfordnlp/glove/resolve/main/glove.6B.zip" + filepath = maybe_download(url=url, work_directory=dest_path) + glove_path = os.path.join(dest_path, "glove") + unzip_file(filepath, glove_path, clean_zip_file=False) + return glove_path
+ + +
[docs]def generate_embeddings( + data_path, + news_words, + news_entities, + train_entities, + valid_entities, + max_sentence=10, + word_embedding_dim=100, +): + """Generate embeddings. + + Args: + data_path (str): Data path. + news_words (dict): News word dictionary. + news_entities (dict): News entity dictionary. + train_entities (str): Train entity file. + valid_entities (str): Validation entity file. + max_sentence (int): Max sentence size. + word_embedding_dim (int): Word embedding dimension. + + Returns: + str, str, str: File paths to news, word and entity embeddings. + """ + embedding_dimensions = [50, 100, 200, 300] + if word_embedding_dim not in embedding_dimensions: + raise ValueError( + f"Wrong embedding dimension, available options are {embedding_dimensions}" + ) + + logger.info("Downloading glove...") + glove_path = download_and_extract_glove(data_path) + + word_set = set() + word_embedding_dict = {} + entity_embedding_dict = {} + + logger.info(f"Loading glove with embedding dimension {word_embedding_dim}...") + glove_file = "glove.6B." + str(word_embedding_dim) + "d.txt" + fp_pretrain_vec = open(os.path.join(glove_path, glove_file), "r", encoding="utf-8") + for line in fp_pretrain_vec: + linesplit = line.split(" ") + word_set.add(linesplit[0]) + word_embedding_dict[linesplit[0]] = np.asarray(list(map(float, linesplit[1:]))) + fp_pretrain_vec.close() + + logger.info("Reading train entities...") + fp_entity_vec_train = open(train_entities, "r", encoding="utf-8") + for line in fp_entity_vec_train: + linesplit = line.split() + entity_embedding_dict[linesplit[0]] = np.asarray( + list(map(float, linesplit[1:])) + ) + fp_entity_vec_train.close() + + logger.info("Reading valid entities...") + fp_entity_vec_valid = open(valid_entities, "r", encoding="utf-8") + for line in fp_entity_vec_valid: + linesplit = line.split() + entity_embedding_dict[linesplit[0]] = np.asarray( + list(map(float, linesplit[1:])) + ) + fp_entity_vec_valid.close() + + logger.info("Generating word and entity indexes...") + word_dict = {} + word_index = 1 + news_word_string_dict = {} + news_entity_string_dict = {} + entity2index = {} + entity_index = 1 + for doc_id in news_words: + news_word_string_dict[doc_id] = [0 for n in range(max_sentence)] + news_entity_string_dict[doc_id] = [0 for n in range(max_sentence)] + surfaceform_entityids = news_entities[doc_id] + for item in surfaceform_entityids: + if item[1] not in entity2index and item[1] in entity_embedding_dict: + entity2index[item[1]] = entity_index + entity_index = entity_index + 1 + for i in range(len(news_words[doc_id])): + if news_words[doc_id][i] in word_embedding_dict: + if news_words[doc_id][i] not in word_dict: + word_dict[news_words[doc_id][i]] = word_index + word_index = word_index + 1 + news_word_string_dict[doc_id][i] = word_dict[news_words[doc_id][i]] + else: + news_word_string_dict[doc_id][i] = word_dict[news_words[doc_id][i]] + for item in surfaceform_entityids: + for surface in item[0]: + for surface_word in surface.split(" "): + if news_words[doc_id][i] == surface_word.lower(): + if item[1] in entity_embedding_dict: + news_entity_string_dict[doc_id][i] = entity2index[ + item[1] + ] + if i == max_sentence - 1: + break + + logger.info("Generating word embeddings...") + word_embeddings = np.zeros([word_index, word_embedding_dim]) + for word in word_dict: + word_embeddings[word_dict[word]] = word_embedding_dict[word] + + logger.info("Generating entity embeddings...") + entity_embeddings = np.zeros([entity_index, word_embedding_dim]) + for entity in entity2index: + entity_embeddings[entity2index[entity]] = entity_embedding_dict[entity] + + news_feature_path = os.path.join(data_path, "doc_feature.txt") + logger.info(f"Saving word and entity features in {news_feature_path}") + fp_doc_string = open(news_feature_path, "w", encoding="utf-8") + for doc_id in news_word_string_dict: + fp_doc_string.write( + doc_id + + " " + + ",".join(list(map(str, news_word_string_dict[doc_id]))) + + " " + + ",".join(list(map(str, news_entity_string_dict[doc_id]))) + + "\n" + ) + + word_embeddings_path = os.path.join( + data_path, "word_embeddings_5w_" + str(word_embedding_dim) + ".npy" + ) + logger.info(f"Saving word embeddings in {word_embeddings_path}") + np.save(word_embeddings_path, word_embeddings) + + entity_embeddings_path = os.path.join( + data_path, "entity_embeddings_5w_" + str(word_embedding_dim) + ".npy" + ) + logger.info(f"Saving word embeddings in {entity_embeddings_path}") + np.save(entity_embeddings_path, entity_embeddings) + + return news_feature_path, word_embeddings_path, entity_embeddings_path
+ + +
[docs]def load_glove_matrix(path_emb, word_dict, word_embedding_dim): + """Load pretrained embedding metrics of words in word_dict + + Args: + path_emb (string): Folder path of downloaded glove file + word_dict (dict): word dictionary + word_embedding_dim: dimention of word embedding vectors + + Returns: + numpy.ndarray, list: pretrained word embedding metrics, words can be found in glove files + """ + + embedding_matrix = np.zeros((len(word_dict) + 1, word_embedding_dim)) + exist_word = [] + + with open(os.path.join(path_emb, f"glove.6B.{word_embedding_dim}d.txt"), "rb") as f: + for l in tqdm(f): # noqa: E741 ambiguous variable name 'l' + l = l.split() # noqa: E741 ambiguous variable name 'l' + word = l[0].decode() + if len(word) != 0: + if word in word_dict: + wordvec = [float(x) for x in l[1:]] + index = word_dict[word] + embedding_matrix[index] = np.array(wordvec) + exist_word.append(word) + + return embedding_matrix, exist_word
+ + +
[docs]def word_tokenize(sent): + """Tokenize a sententence + + Args: + sent: the sentence need to be tokenized + + Returns: + list: words in the sentence + """ + + # treat consecutive words or special punctuation as words + pat = re.compile(r"[\w]+|[.,!?;|]") + if isinstance(sent, str): + return pat.findall(sent.lower()) + else: + return []
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/movielens.html b/_modules/recommenders/datasets/movielens.html new file mode 100644 index 0000000000..92d4fdb7d5 --- /dev/null +++ b/_modules/recommenders/datasets/movielens.html @@ -0,0 +1,1098 @@ + + + + + + + + + + + recommenders.datasets.movielens — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.movielens

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import re
+import random
+import shutil
+import warnings
+import pandas as pd
+from typing import Optional
+from zipfile import ZipFile
+from recommenders.datasets.download_utils import maybe_download, download_path
+from recommenders.utils.notebook_utils import is_databricks
+from recommenders.utils.constants import (
+    DEFAULT_HEADER,
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_TIMESTAMP_COL,
+    DEFAULT_TITLE_COL,
+    DEFAULT_GENRE_COL,
+)
+
+try:
+    from pyspark.sql.types import (
+        StructType,
+        StructField,
+        StringType,
+        IntegerType,
+        FloatType,
+        LongType,
+    )
+except ImportError:
+    pass  # so the environment without spark doesn't break
+
+import pandera as pa
+import pandera.extensions as extensions
+from pandera import Field
+from pandera.typing import Series
+
+
+class _DataFormat:
+    def __init__(
+        self,
+        sep,
+        path,
+        has_header=False,
+        item_sep=None,
+        item_path=None,
+        item_has_header=False,
+    ):
+        """MovieLens data format container as a different size of MovieLens data file
+        has a different format
+
+        Args:
+            sep (str): Rating data delimiter
+            path (str): Rating data path within the original zip file
+            has_header (bool): Whether the rating data contains a header line or not
+            item_sep (str): Item data delimiter
+            item_path (str): Item data path within the original zip file
+            item_has_header (bool): Whether the item data contains a header line or not
+        """
+
+        # Rating file
+        self._sep = sep
+        self._path = path
+        self._has_header = has_header
+
+        # Item file
+        self._item_sep = item_sep
+        self._item_path = item_path
+        self._item_has_header = item_has_header
+
+    @property
+    def separator(self):
+        return self._sep
+
+    @property
+    def path(self):
+        return self._path
+
+    @property
+    def has_header(self):
+        return self._has_header
+
+    @property
+    def item_separator(self):
+        return self._item_sep
+
+    @property
+    def item_path(self):
+        return self._item_path
+
+    @property
+    def item_has_header(self):
+        return self._item_has_header
+
+
+# 10m and 20m data do not have user data
+DATA_FORMAT = {
+    "100k": _DataFormat("\t", "ml-100k/u.data", False, "|", "ml-100k/u.item", False),
+    "1m": _DataFormat(
+        "::", "ml-1m/ratings.dat", False, "::", "ml-1m/movies.dat", False
+    ),
+    "10m": _DataFormat(
+        "::", "ml-10M100K/ratings.dat", False, "::", "ml-10M100K/movies.dat", False
+    ),
+    "20m": _DataFormat(",", "ml-20m/ratings.csv", True, ",", "ml-20m/movies.csv", True),
+}
+
+# Fake data for testing only
+MOCK_DATA_FORMAT = {
+    "mock100": {"size": 100, "seed": 6},
+}
+
+# 100K data genres index to string mapper. For 1m, 10m, and 20m, the genres labels are already in the dataset.
+GENRES = (
+    "unknown",
+    "Action",
+    "Adventure",
+    "Animation",
+    "Children's",
+    "Comedy",
+    "Crime",
+    "Documentary",
+    "Drama",
+    "Fantasy",
+    "Film-Noir",
+    "Horror",
+    "Musical",
+    "Mystery",
+    "Romance",
+    "Sci-Fi",
+    "Thriller",
+    "War",
+    "Western",
+)
+
+
+# Warning and error messages
+WARNING_MOVIE_LENS_HEADER = """MovieLens rating dataset has four columns
+    (user id, movie id, rating, and timestamp), but more than four column names are provided.
+    Will only use the first four column names."""
+WARNING_HAVE_SCHEMA_AND_HEADER = """Both schema and header are provided.
+    The header argument will be ignored."""
+ERROR_MOVIE_LENS_SIZE = (
+    "Invalid data size. Should be one of {100k, 1m, 10m, or 20m, or mock100}"
+)
+ERROR_HEADER = "Header error. At least user and movie column names should be provided"
+
+
+
[docs]def load_pandas_df( + size="100k", + header=None, + local_cache_path=None, + title_col=None, + genres_col=None, + year_col=None, +): + """Loads the MovieLens dataset as pd.DataFrame. + + Download the dataset from https://files.grouplens.org/datasets/movielens, unzip, and load. + To load movie information only, you can use load_item_df function. + + Args: + size (str): Size of the data to load. One of ("100k", "1m", "10m", "20m", "mock100"). + header (list or tuple or None): Rating dataset header. + If `size` is set to any of 'MOCK_DATA_FORMAT', this parameter is ignored and data is rendered using the 'DEFAULT_HEADER' instead. + local_cache_path (str): Path (directory or a zip file) to cache the downloaded zip file. + If None, all the intermediate files will be stored in a temporary directory and removed after use. + If `size` is set to any of 'MOCK_DATA_FORMAT', this parameter is ignored. + title_col (str): Movie title column name. If None, the column will not be loaded. + genres_col (str): Genres column name. Genres are '|' separated string. + If None, the column will not be loaded. + year_col (str): Movie release year column name. If None, the column will not be loaded. + If `size` is set to any of 'MOCK_DATA_FORMAT', this parameter is ignored. + + Returns: + pandas.DataFrame: Movie rating dataset. + + + **Examples** + + .. code-block:: python + + # To load just user-id, item-id, and ratings from MovieLens-1M dataset, + df = load_pandas_df('1m', ('UserId', 'ItemId', 'Rating')) + + # To load rating's timestamp together, + df = load_pandas_df('1m', ('UserId', 'ItemId', 'Rating', 'Timestamp')) + + # To load movie's title, genres, and released year info along with the ratings data, + df = load_pandas_df('1m', ('UserId', 'ItemId', 'Rating', 'Timestamp'), + title_col='Title', + genres_col='Genres', + year_col='Year' + ) + """ + size = size.lower() + if size not in DATA_FORMAT and size not in MOCK_DATA_FORMAT: + raise ValueError(f"Size: {size}. " + ERROR_MOVIE_LENS_SIZE) + + if header is None: + header = DEFAULT_HEADER + elif len(header) < 2: + raise ValueError(ERROR_HEADER) + elif len(header) > 4: + warnings.warn(WARNING_MOVIE_LENS_HEADER) + header = header[:4] + + if size in MOCK_DATA_FORMAT: + # generate fake data + return MockMovielensSchema.get_df( + keep_first_n_cols=len(header), + keep_title_col=(title_col is not None), + keep_genre_col=(genres_col is not None), + **MOCK_DATA_FORMAT[ + size + ], # supply the rest of the kwarg with the dictionary + ) + + movie_col = header[1] + + with download_path(local_cache_path) as path: + filepath = os.path.join(path, "ml-{}.zip".format(size)) + datapath, item_datapath = _maybe_download_and_extract(size, filepath) + + # Load movie features such as title, genres, and release year + item_df = _load_item_df( + size, item_datapath, movie_col, title_col, genres_col, year_col + ) + + # Load rating data + df = pd.read_csv( + datapath, + sep=DATA_FORMAT[size].separator, + engine="python", + names=header, + usecols=[*range(len(header))], + header=0 if DATA_FORMAT[size].has_header else None, + ) + + # Convert 'rating' type to float + if len(header) > 2: + df[header[2]] = df[header[2]].astype(float) + + # Merge rating df w/ item_df + if item_df is not None: + df = df.merge(item_df, on=header[1]) + + return df
+ + +
[docs]def load_item_df( + size="100k", + local_cache_path=None, + movie_col=DEFAULT_ITEM_COL, + title_col=None, + genres_col=None, + year_col=None, +): + """Loads Movie info. + + Args: + size (str): Size of the data to load. One of ("100k", "1m", "10m", "20m"). + local_cache_path (str): Path (directory or a zip file) to cache the downloaded zip file. + If None, all the intermediate files will be stored in a temporary directory and removed after use. + movie_col (str): Movie id column name. + title_col (str): Movie title column name. If None, the column will not be loaded. + genres_col (str): Genres column name. Genres are '|' separated string. + If None, the column will not be loaded. + year_col (str): Movie release year column name. If None, the column will not be loaded. + + Returns: + pandas.DataFrame: Movie information data, such as title, genres, and release year. + """ + size = size.lower() + if size not in DATA_FORMAT: + raise ValueError(f"Size: {size}. " + ERROR_MOVIE_LENS_SIZE) + + with download_path(local_cache_path) as path: + filepath = os.path.join(path, "ml-{}.zip".format(size)) + _, item_datapath = _maybe_download_and_extract(size, filepath) + item_df = _load_item_df( + size, item_datapath, movie_col, title_col, genres_col, year_col + ) + + return item_df
+ + +def _load_item_df(size, item_datapath, movie_col, title_col, genres_col, year_col): + """Loads Movie info""" + if title_col is None and genres_col is None and year_col is None: + return None + + item_header = [movie_col] + usecols = [0] + + # Year is parsed from title + if title_col is not None or year_col is not None: + item_header.append("title_year") + usecols.append(1) + + genres_header_100k = None + if genres_col is not None: + # 100k data's movie genres are encoded as a binary array (the last 19 fields) + # For details, see https://files.grouplens.org/datasets/movielens/ml-100k-README.txt + if size == "100k": + genres_header_100k = [*(str(i) for i in range(19))] + item_header.extend(genres_header_100k) + usecols.extend([*range(5, 24)]) # genres columns + else: + item_header.append(genres_col) + usecols.append(2) # genres column + + item_df = pd.read_csv( + item_datapath, + sep=DATA_FORMAT[size].item_separator, + engine="python", + names=item_header, + usecols=usecols, + header=0 if DATA_FORMAT[size].item_has_header else None, + encoding="ISO-8859-1", + ) + + # Convert 100k data's format: '0|0|1|...' to 'Action|Romance|..." + if genres_header_100k is not None: + item_df[genres_col] = item_df[genres_header_100k].values.tolist() + item_df[genres_col] = item_df[genres_col].map( + lambda l: "|".join([GENRES[i] for i, v in enumerate(l) if v == 1]) + ) + + item_df.drop(genres_header_100k, axis=1, inplace=True) + + # Parse year from movie title. Note, MovieLens title format is "title (year)" + # Note, there are very few records that are missing the year info. + if year_col is not None: + + def parse_year(t): + parsed = re.split("[()]", t) + if len(parsed) > 2 and parsed[-2].isdecimal(): + return parsed[-2] + else: + return None + + item_df[year_col] = item_df["title_year"].map(parse_year) + if title_col is None: + item_df.drop("title_year", axis=1, inplace=True) + + if title_col is not None: + item_df.rename(columns={"title_year": title_col}, inplace=True) + + return item_df + + +
[docs]def load_spark_df( + spark, + size="100k", + header=None, + schema=None, + local_cache_path=None, + dbutils=None, + title_col=None, + genres_col=None, + year_col=None, +): + """Loads the MovieLens dataset as `pyspark.sql.DataFrame`. + + Download the dataset from https://files.grouplens.org/datasets/movielens, unzip, and load as `pyspark.sql.DataFrame`. + + To load movie information only, you can use `load_item_df` function. + + Args: + spark (pyspark.SparkSession): Spark session. + size (str): Size of the data to load. One of ("100k", "1m", "10m", "20m", "mock100"). + header (list or tuple): Rating dataset header. + If `schema` is provided or `size` is set to any of 'MOCK_DATA_FORMAT', this argument is ignored. + schema (pyspark.StructType): Dataset schema. + If `size` is set to any of 'MOCK_DATA_FORMAT', data is rendered in the 'MockMovielensSchema' instead. + local_cache_path (str): Path (directory or a zip file) to cache the downloaded zip file. + If None, all the intermediate files will be stored in a temporary directory and removed after use. + dbutils (Databricks.dbutils): Databricks utility object + If `size` is set to any of 'MOCK_DATA_FORMAT', this parameter is ignored. + title_col (str): Title column name. If None, the column will not be loaded. + genres_col (str): Genres column name. Genres are '|' separated string. + If None, the column will not be loaded. + year_col (str): Movie release year column name. If None, the column will not be loaded. + If `size` is set to any of 'MOCK_DATA_FORMAT', this parameter is ignored. + + Returns: + pyspark.sql.DataFrame: Movie rating dataset. + + **Examples** + + .. code-block:: python + + # To load just user-id, item-id, and ratings from MovieLens-1M dataset: + spark_df = load_spark_df(spark, '1m', ('UserId', 'ItemId', 'Rating')) + + # The schema can be defined as well: + schema = StructType([ + StructField(DEFAULT_USER_COL, IntegerType()), + StructField(DEFAULT_ITEM_COL, IntegerType()), + StructField(DEFAULT_RATING_COL, FloatType()), + StructField(DEFAULT_TIMESTAMP_COL, LongType()), + ]) + spark_df = load_spark_df(spark, '1m', ('UserId', 'ItemId', 'Rating'), schema=schema) + + # To load rating's timestamp together: + spark_df = load_spark_df(spark, '1m', ('UserId', 'ItemId', 'Rating', 'Timestamp')) + + # To load movie's title, genres, and released year info along with the ratings data: + spark_df = load_spark_df(spark, '1m', ('UserId', 'ItemId', 'Rating', 'Timestamp'), + title_col='Title', + genres_col='Genres', + year_col='Year' + ) + + # On DataBricks, pass the dbutils argument as follows: + spark_df = load_spark_df(spark, dbutils=dbutils) + """ + size = size.lower() + if size not in DATA_FORMAT and size not in MOCK_DATA_FORMAT: + raise ValueError(f"Size: {size}. " + ERROR_MOVIE_LENS_SIZE) + + if size in MOCK_DATA_FORMAT: + # generate fake data + return MockMovielensSchema.get_spark_df( + spark, + keep_title_col=(title_col is not None), + keep_genre_col=(genres_col is not None), + **MOCK_DATA_FORMAT[ + size + ], # supply the rest of the kwarg with the dictionary + ) + + schema = _get_schema(header, schema) + if len(schema) < 2: + raise ValueError(ERROR_HEADER) + + movie_col = schema[1].name + + with download_path(local_cache_path) as path: + filepath = os.path.join(path, "ml-{}.zip".format(size)) + datapath, item_datapath = _maybe_download_and_extract(size, filepath) + spark_datapath = "file:///" + datapath # shorten form of file://localhost/ + + # Load movie features such as title, genres, and release year. + # Since the file size is small, we directly load as pd.DataFrame from the driver node + # and then convert into pyspark.sql.DataFrame + item_pd_df = _load_item_df( + size, item_datapath, movie_col, title_col, genres_col, year_col + ) + item_df = spark.createDataFrame(item_pd_df) if item_pd_df is not None else None + + if is_databricks(): + if dbutils is None: + raise ValueError( + """ + To use on a Databricks, dbutils object should be passed as an argument. + E.g. load_spark_df(spark, dbutils=dbutils) + """ + ) + + # Move rating file to DBFS in order to load into pyspark.sql.DataFrame + dbfs_datapath = "dbfs:/tmp/" + datapath + dbutils.fs.mv(spark_datapath, dbfs_datapath) + spark_datapath = dbfs_datapath + + # pyspark's read csv currently doesn't support multi-character delimiter, thus we manually handle that + separator = DATA_FORMAT[size].separator + if len(separator) > 1: + raw_data = spark.sparkContext.textFile(spark_datapath) + data_rdd = raw_data.map(lambda l: l.split(separator)).map( + lambda c: [int(c[0]), int(c[1]), float(c[2]), int(c[3])][: len(schema)] + ) + df = spark.createDataFrame(data_rdd, schema) + else: + df = spark.read.csv( + spark_datapath, + schema=schema, + sep=separator, + header=DATA_FORMAT[size].has_header, + ) + + # Merge rating df w/ item_df + if item_df is not None: + df = df.join(item_df, movie_col, "left") + + # Cache and force trigger action since data-file might be removed. + df.cache() + df.count() + + return df
+ + +def _get_schema(header, schema): + if schema is None or len(schema) == 0: + # Use header to generate schema + if header is None or len(header) == 0: + header = DEFAULT_HEADER + elif len(header) > 4: + warnings.warn(WARNING_MOVIE_LENS_HEADER) + header = header[:4] + + schema = StructType() + try: + ( + schema.add(StructField(header[0], IntegerType())) + .add(StructField(header[1], IntegerType())) + .add(StructField(header[2], FloatType())) + .add(StructField(header[3], LongType())) + ) + except IndexError: + pass + else: + if header is not None: + warnings.warn(WARNING_HAVE_SCHEMA_AND_HEADER) + + if len(schema) > 4: + warnings.warn(WARNING_MOVIE_LENS_HEADER) + schema = schema[:4] + + return schema + + +def _maybe_download_and_extract(size, dest_path): + """Downloads and extracts MovieLens rating and item datafiles if they don’t already exist""" + dirs, _ = os.path.split(dest_path) + if not os.path.exists(dirs): + os.makedirs(dirs) + + _, rating_filename = os.path.split(DATA_FORMAT[size].path) + rating_path = os.path.join(dirs, rating_filename) + _, item_filename = os.path.split(DATA_FORMAT[size].item_path) + item_path = os.path.join(dirs, item_filename) + + if not os.path.exists(rating_path) or not os.path.exists(item_path): + download_movielens(size, dest_path) + extract_movielens(size, rating_path, item_path, dest_path) + + return rating_path, item_path + + +
[docs]def download_movielens(size, dest_path): + """Downloads MovieLens datafile. + + Args: + size (str): Size of the data to load. One of ("100k", "1m", "10m", "20m"). + dest_path (str): File path for the downloaded file + """ + if size not in DATA_FORMAT: + raise ValueError(f"Size: {size}. " + ERROR_MOVIE_LENS_SIZE) + + url = "https://files.grouplens.org/datasets/movielens/ml-" + size + ".zip" + dirs, file = os.path.split(dest_path) + maybe_download(url, file, work_directory=dirs)
+ + +
[docs]def extract_movielens(size, rating_path, item_path, zip_path): + """Extract MovieLens rating and item datafiles from the MovieLens raw zip file. + + To extract all files instead of just rating and item datafiles, + use ZipFile's extractall(path) instead. + + Args: + size (str): Size of the data to load. One of ("100k", "1m", "10m", "20m"). + rating_path (str): Destination path for rating datafile + item_path (str): Destination path for item datafile + zip_path (str): zipfile path + """ + with ZipFile(zip_path, "r") as z: + with z.open(DATA_FORMAT[size].path) as zf, open(rating_path, "wb") as f: + shutil.copyfileobj(zf, f) + with z.open(DATA_FORMAT[size].item_path) as zf, open(item_path, "wb") as f: + shutil.copyfileobj(zf, f)
+ + +# For more information on data synthesis, see https://pandera.readthedocs.io/en/latest/data_synthesis_strategies.html +@extensions.register_check_method(statistics=["columns"], supported_types=pd.DataFrame) +def unique_columns(df, *, columns): + return not df[columns].duplicated().any() + + +
[docs]class MockMovielensSchema(pa.SchemaModel): + """ + Mock dataset schema to generate fake data for testing purpose. + This schema is configured to mimic the Movielens dataset + + https://files.grouplens.org/datasets/movielens/ml-100k/ + + Dataset schema and generation is configured using pandera. + Please see https://pandera.readthedocs.io/en/latest/schema_models.html + for more information. + """ + + # Some notebooks will do a cross join with userID and itemID, + # a sparse range for these IDs can slow down the notebook tests + userID: Series[int] = Field( + in_range={"min_value": 1, "max_value": 50}, alias=DEFAULT_USER_COL + ) + itemID: Series[int] = Field( + in_range={"min_value": 1, "max_value": 50}, alias=DEFAULT_ITEM_COL + ) + rating: Series[float] = Field( + in_range={"min_value": 1, "max_value": 5}, alias=DEFAULT_RATING_COL + ) + timestamp: Series[int] = Field( + in_range={"min_value": 0, "max_value": 1e9}, alias=DEFAULT_TIMESTAMP_COL + ) + title: Series[str] = Field(eq="foo", alias=DEFAULT_TITLE_COL) + genre: Series[str] = Field(eq="genreA|0", alias=DEFAULT_GENRE_COL) + +
[docs] @classmethod + def get_df( + cls, + size: int = 3, + seed: int = 100, + keep_first_n_cols: Optional[int] = None, + keep_title_col: bool = False, + keep_genre_col: bool = False, + ) -> pd.DataFrame: + """Return fake movielens dataset as a Pandas Dataframe with specified rows. + + Args: + size (int): number of rows to generate + seed (int, optional): seeding the pseudo-number generation. Defaults to 100. + keep_first_n_cols (int, optional): keep the first n default movielens columns. + keep_title_col (bool): remove the title column if False. Defaults to True. + keep_genre_col (bool): remove the genre column if False. Defaults to True. + + Returns: + pandas.DataFrame: a mock dataset + """ + schema = cls.to_schema() + if keep_first_n_cols is not None: + if keep_first_n_cols < 1 or keep_first_n_cols > len(DEFAULT_HEADER): + raise ValueError( + f"Invalid value for 'keep_first_n_cols': {keep_first_n_cols}. Valid range: [1-{len(DEFAULT_HEADER)}]" + ) + schema = schema.remove_columns(DEFAULT_HEADER[keep_first_n_cols:]) + if not keep_title_col: + schema = schema.remove_columns([DEFAULT_TITLE_COL]) + if not keep_genre_col: + schema = schema.remove_columns([DEFAULT_GENRE_COL]) + + random.seed(seed) + schema.checks = [pa.Check.unique_columns([DEFAULT_USER_COL, DEFAULT_ITEM_COL])] + return schema.example(size=size)
+ +
[docs] @classmethod + def get_spark_df( + cls, + spark, + size: int = 3, + seed: int = 100, + keep_title_col: bool = False, + keep_genre_col: bool = False, + tmp_path: Optional[str] = None, + ): + """Return fake movielens dataset as a Spark Dataframe with specified rows + + Args: + spark (SparkSession): spark session to load the dataframe into + size (int): number of rows to generate + seed (int): seeding the pseudo-number generation. Defaults to 100. + keep_title_col (bool): remove the title column if False. Defaults to False. + keep_genre_col (bool): remove the genre column if False. Defaults to False. + tmp_path (str, optional): path to store files for serialization purpose + when transferring data from python to java. + If None, a temporal path is used instead + + Returns: + pyspark.sql.DataFrame: a mock dataset + """ + pandas_df = cls.get_df( + size=size, seed=seed, keep_title_col=True, keep_genre_col=True + ) + + # generate temp folder + with download_path(tmp_path) as tmp_folder: + filepath = os.path.join(tmp_folder, f"mock_movielens_{size}.csv") + # serialize the pandas.df as a csv to avoid the expensive java <-> python communication + pandas_df.to_csv(filepath, header=False, index=False) + spark_df = spark.read.csv( + filepath, schema=cls._get_spark_deserialization_schema() + ) + # Cache and force trigger action since data-file might be removed. + spark_df.cache() + spark_df.count() + + if not keep_title_col: + spark_df = spark_df.drop(DEFAULT_TITLE_COL) + if not keep_genre_col: + spark_df = spark_df.drop(DEFAULT_GENRE_COL) + return spark_df
+ + @classmethod + def _get_spark_deserialization_schema(cls): + return StructType( + [ + StructField(DEFAULT_USER_COL, IntegerType()), + StructField(DEFAULT_ITEM_COL, IntegerType()), + StructField(DEFAULT_RATING_COL, FloatType()), + StructField(DEFAULT_TIMESTAMP_COL, StringType()), + StructField(DEFAULT_TITLE_COL, StringType()), + StructField(DEFAULT_GENRE_COL, StringType()), + ] + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/pandas_df_utils.html b/_modules/recommenders/datasets/pandas_df_utils.html new file mode 100644 index 0000000000..a1b098c99d --- /dev/null +++ b/_modules/recommenders/datasets/pandas_df_utils.html @@ -0,0 +1,882 @@ + + + + + + + + + + + recommenders.datasets.pandas_df_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.pandas_df_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import logging
+import pandas as pd
+import numpy as np
+from functools import lru_cache, wraps
+
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_LABEL_COL,
+)
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]def user_item_pairs( + user_df, + item_df, + user_col=DEFAULT_USER_COL, + item_col=DEFAULT_ITEM_COL, + user_item_filter_df=None, + shuffle=True, + seed=None, +): + """Get all pairs of users and items data. + + Args: + user_df (pandas.DataFrame): User data containing unique user ids and maybe their features. + item_df (pandas.DataFrame): Item data containing unique item ids and maybe their features. + user_col (str): User id column name. + item_col (str): Item id column name. + user_item_filter_df (pd.DataFrame): User-item pairs to be used as a filter. + shuffle (bool): If True, shuffles the result. + seed (int): Random seed for shuffle + + Returns: + pandas.DataFrame: All pairs of user-item from user_df and item_df, excepting the pairs in user_item_filter_df. + """ + + # Get all user-item pairs + user_df["key"] = 1 + item_df["key"] = 1 + users_items = user_df.merge(item_df, on="key") + + user_df.drop("key", axis=1, inplace=True) + item_df.drop("key", axis=1, inplace=True) + users_items.drop("key", axis=1, inplace=True) + + # Filter + if user_item_filter_df is not None: + users_items = filter_by(users_items, user_item_filter_df, [user_col, item_col]) + + if shuffle: + users_items = users_items.sample(frac=1, random_state=seed).reset_index( + drop=True + ) + + return users_items
+ + +
[docs]def filter_by(df, filter_by_df, filter_by_cols): + """From the input DataFrame `df`, remove the records whose target column `filter_by_cols` values are + exist in the filter-by DataFrame `filter_by_df`. + + Args: + df (pandas.DataFrame): Source dataframe. + filter_by_df (pandas.DataFrame): Filter dataframe. + filter_by_cols (iterable of str): Filter columns. + + Returns: + pandas.DataFrame: Dataframe filtered by `filter_by_df` on `filter_by_cols`. + + """ + + return df.loc[ + ~df.set_index(filter_by_cols).index.isin( + filter_by_df.set_index(filter_by_cols).index + ) + ]
+ + +
[docs]class LibffmConverter: + """Converts an input dataframe to another dataframe in libffm format. A text file of the converted + Dataframe is optionally generated. + + Note: + + The input dataframe is expected to represent the feature data in the following schema: + + .. code-block:: python + + |field-1|field-2|...|field-n|rating| + |feature-1-1|feature-2-1|...|feature-n-1|1| + |feature-1-2|feature-2-2|...|feature-n-2|0| + ... + |feature-1-i|feature-2-j|...|feature-n-k|0| + + Where + 1. each `field-*` is the column name of the dataframe (column of label/rating is excluded), and + 2. `feature-*-*` can be either a string or a numerical value, representing the categorical variable or + actual numerical variable of the feature value in the field, respectively. + 3. If there are ordinal variables represented in int types, users should make sure these columns + are properly converted to string type. + + The above data will be converted to the libffm format by following the convention as explained in + `this paper <https://www.csie.ntu.edu.tw/~r01922136/slides/ffm.pdf>`_. + + i.e. `<field_index>:<field_feature_index>:1` or `<field_index>:<field_feature_index>:<field_feature_value>`, + depending on the data type of the features in the original dataframe. + + Args: + filepath (str): path to save the converted data. + + Attributes: + field_count (int): count of field in the libffm format data + feature_count (int): count of feature in the libffm format data + filepath (str or None): file path where the output is stored - it can be None or a string + + Examples: + >>> import pandas as pd + >>> df_feature = pd.DataFrame({ + 'rating': [1, 0, 0, 1, 1], + 'field1': ['xxx1', 'xxx2', 'xxx4', 'xxx4', 'xxx4'], + 'field2': [3, 4, 5, 6, 7], + 'field3': [1.0, 2.0, 3.0, 4.0, 5.0], + 'field4': ['1', '2', '3', '4', '5'] + }) + >>> converter = LibffmConverter().fit(df_feature, col_rating='rating') + >>> df_out = converter.transform(df_feature) + >>> df_out + rating field1 field2 field3 field4 + 0 1 1:1:1 2:4:3 3:5:1.0 4:6:1 + 1 0 1:2:1 2:4:4 3:5:2.0 4:7:1 + 2 0 1:3:1 2:4:5 3:5:3.0 4:8:1 + 3 1 1:3:1 2:4:6 3:5:4.0 4:9:1 + 4 1 1:3:1 2:4:7 3:5:5.0 4:10:1 + """ + + def __init__(self, filepath=None): + self.filepath = filepath + self.col_rating = None + self.field_names = None + self.field_count = None + self.feature_count = None + +
[docs] def fit(self, df, col_rating=DEFAULT_RATING_COL): + """Fit the dataframe for libffm format. + This method does nothing but check the validity of the input columns + + Args: + df (pandas.DataFrame): input Pandas dataframe. + col_rating (str): rating of the data. + + Return: + object: the instance of the converter + """ + + # Check column types. + types = df.dtypes + if not all( + [ + x == object or np.issubdtype(x, np.integer) or x == np.float + for x in types + ] + ): + raise TypeError("Input columns should be only object and/or numeric types.") + + if col_rating not in df.columns: + raise TypeError( + "Column of {} is not in input dataframe columns".format(col_rating) + ) + + self.col_rating = col_rating + self.field_names = list(df.drop(col_rating, axis=1).columns) + + return self
+ +
[docs] def transform(self, df): + """Tranform an input dataset with the same schema (column names and dtypes) to libffm format + by using the fitted converter. + + Args: + df (pandas.DataFrame): input Pandas dataframe. + + Return: + pandas.DataFrame: Output libffm format dataframe. + """ + if self.col_rating not in df.columns: + raise ValueError( + "Input dataset does not contain the label column {} in the fitting dataset".format( + self.col_rating + ) + ) + + if not all([x in df.columns for x in self.field_names]): + raise ValueError( + "Not all columns in the input dataset appear in the fitting dataset" + ) + + # Encode field-feature. + idx = 1 + self.field_feature_dict = {} + for field in self.field_names: + for feature in df[field].values: + # Check whether (field, feature) tuple exists in the dict or not. + # If not, put them into the key-values of the dict and count the index. + if (field, feature) not in self.field_feature_dict: + self.field_feature_dict[(field, feature)] = idx + if df[field].dtype == object: + idx += 1 + if df[field].dtype != object: + idx += 1 + + self.field_count = len(self.field_names) + self.feature_count = idx - 1 + + def _convert(field, feature, field_index, field_feature_index_dict): + field_feature_index = field_feature_index_dict[(field, feature)] + if isinstance(feature, str): + feature = 1 + return "{}:{}:{}".format(field_index, field_feature_index, feature) + + for col_index, col in enumerate(self.field_names): + df[col] = df[col].apply( + lambda x: _convert(col, x, col_index + 1, self.field_feature_dict) + ) + + # Move rating column to the first. + column_names = self.field_names[:] + column_names.insert(0, self.col_rating) + df = df[column_names] + + if self.filepath is not None: + np.savetxt(self.filepath, df.values, delimiter=" ", fmt="%s") + + return df
+ +
[docs] def fit_transform(self, df, col_rating=DEFAULT_RATING_COL): + """Do fit and transform in a row + + Args: + df (pandas.DataFrame): input Pandas dataframe. + col_rating (str): rating of the data. + + Return: + pandas.DataFrame: Output libffm format dataframe. + """ + return self.fit(df, col_rating=col_rating).transform(df)
+ +
[docs] def get_params(self): + """Get parameters (attributes) of the libffm converter + + Return: + dict: A dictionary that contains parameters field count, feature count, and file path. + """ + return { + "field count": self.field_count, + "feature count": self.feature_count, + "file path": self.filepath, + }
+ + +
[docs]def negative_feedback_sampler( + df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_label=DEFAULT_LABEL_COL, + col_feedback="feedback", + ratio_neg_per_user=1, + pos_value=1, + neg_value=0, + seed=42, +): + """Utility function to sample negative feedback from user-item interaction dataset. + This negative sampling function will take the user-item interaction data to create + binarized feedback, i.e., 1 and 0 indicate positive and negative feedback, + respectively. + + Negative sampling is used in the literature frequently to generate negative samples + from a user-item interaction data. + + See for example the `neural collaborative filtering paper <https://www.comp.nus.edu.sg/~xiangnan/papers/ncf.pdf>`_. + + Args: + df (pandas.DataFrame): input data that contains user-item tuples. + col_user (str): user id column name. + col_item (str): item id column name. + col_label (str): label column name in df. + col_feedback (str): feedback column name in the returned data frame; it is used for the generated column + of positive and negative feedback. + ratio_neg_per_user (int): ratio of negative feedback w.r.t to the number of positive feedback for each user. + If the samples exceed the number of total possible negative feedback samples, it will be reduced to the + number of all the possible samples. + pos_value (float): value of positive feedback. + neg_value (float): value of negative feedback. + inplace (bool): + seed (int): seed for the random state of the sampling function. + + Returns: + pandas.DataFrame: Data with negative feedback. + + Examples: + >>> import pandas as pd + >>> df = pd.DataFrame({ + 'userID': [1, 2, 3], + 'itemID': [1, 2, 3], + 'rating': [5, 5, 5] + }) + >>> df_neg_sampled = negative_feedback_sampler( + df, col_user='userID', col_item='itemID', ratio_neg_per_user=1 + ) + >>> df_neg_sampled + userID itemID feedback + 1 1 1 + 1 2 0 + 2 2 1 + 2 1 0 + 3 3 1 + 3 1 0 + """ + # Get all of the users and items. + items = df[col_item].unique() + rng = np.random.default_rng(seed=seed) + + def sample_items(user_df): + # Sample negative items for the data frame restricted to a specific user + n_u = len(user_df) + neg_sample_size = max(round(n_u * ratio_neg_per_user), 1) + # Draw (n_u + neg_sample_size) items and keep neg_sample_size of these + # that are not already in user_df. This requires a set difference from items_sample + # instead of items, which is more efficient when len(items) is large. + sample_size = min(n_u + neg_sample_size, len(items)) + items_sample = rng.choice(items, sample_size, replace=False) + new_items = np.setdiff1d(items_sample, user_df[col_item])[:neg_sample_size] + new_df = pd.DataFrame( + data={ + col_user: user_df.name, + col_item: new_items, + col_label: neg_value, + } + ) + return pd.concat([user_df, new_df], ignore_index=True) + + res_df = df.copy() + res_df[col_label] = pos_value + return ( + res_df.groupby(col_user) + .apply(sample_items) + .reset_index(drop=True) + .rename(columns={col_label: col_feedback}) + )
+ + +
[docs]def has_columns(df, columns): + """Check if DataFrame has necessary columns + + Args: + df (pandas.DataFrame): DataFrame + columns (iterable(str)): columns to check for + + Returns: + bool: True if DataFrame has specified columns. + """ + if not isinstance(columns, set): + columns = set(columns) + return columns.issubset(df.columns)
+ + +
[docs]def has_same_base_dtype(df_1, df_2, columns=None): + """Check if specified columns have the same base dtypes across both DataFrames + + Args: + df_1 (pandas.DataFrame): first DataFrame + df_2 (pandas.DataFrame): second DataFrame + columns (list(str)): columns to check, None checks all columns + + Returns: + bool: True if DataFrames columns have the same base dtypes. + """ + + if columns is None: + if any(set(df_1.columns).symmetric_difference(set(df_2.columns))): + logger.error( + "Cannot test all columns because they are not all shared across DataFrames" + ) + return False + columns = df_1.columns + + if not ( + has_columns(df=df_1, columns=columns) and has_columns(df=df_2, columns=columns) + ): + return False + + result = True + for column in columns: + if df_1[column].dtype.type.__base__ != df_2[column].dtype.type.__base__: + logger.error("Columns {} do not have the same base datatype".format(column)) + result = False + + return result
+ + +
[docs]class PandasHash: + """Wrapper class to allow pandas objects (DataFrames or Series) to be hashable""" + + # reserve space just for a single pandas object + __slots__ = "pandas_object" + + def __init__(self, pandas_object): + """Initialize class + + Args: + pandas_object (pandas.DataFrame|pandas.Series): pandas object + """ + + if not isinstance(pandas_object, (pd.DataFrame, pd.Series)): + raise TypeError("Can only wrap pandas DataFrame or Series objects") + self.pandas_object = pandas_object + + def __eq__(self, other): + """Overwrite equality comparison + + Args: + other (pandas.DataFrame|pandas.Series): pandas object to compare + + Returns: + bool: whether other object is the same as this one + """ + + return hash(self) == hash(other) + + def __hash__(self): + """Overwrite hash operator for use with pandas objects + + Returns: + int: hashed value of object + """ + + hashable = tuple(self.pandas_object.values.tobytes()) + if isinstance(self.pandas_object, pd.DataFrame): + hashable += tuple(self.pandas_object.columns) + else: + hashable += tuple(self.pandas_object.name) + return hash(hashable)
+ + +
[docs]def lru_cache_df(maxsize, typed=False): + """Least-recently-used cache decorator for pandas Dataframes. + + Decorator to wrap a function with a memoizing callable that saves up to the maxsize most recent calls. It can + save time when an expensive or I/O bound function is periodically called with the same arguments. + + Inspired in the `lru_cache function <https://docs.python.org/3/library/functools.html#functools.lru_cache>`_. + + Args: + maxsize (int|None): max size of cache, if set to None cache is boundless + typed (bool): arguments of different types are cached separately + """ + + def to_pandas_hash(val): + """Return PandaHash object if input is a DataFrame otherwise return input unchanged""" + return PandasHash(val) if isinstance(val, pd.DataFrame) else val + + def from_pandas_hash(val): + """Extract DataFrame if input is PandaHash object otherwise return input unchanged""" + return val.pandas_object if isinstance(val, PandasHash) else val + + def decorating_function(user_function): + @wraps(user_function) + def wrapper(*args, **kwargs): + # convert DataFrames in args and kwargs to PandaHash objects + args = tuple([to_pandas_hash(a) for a in args]) + kwargs = {k: to_pandas_hash(v) for k, v in kwargs.items()} + return cached_wrapper(*args, **kwargs) + + @lru_cache(maxsize=maxsize, typed=typed) + def cached_wrapper(*args, **kwargs): + # get DataFrames from PandaHash objects in args and kwargs + args = tuple([from_pandas_hash(a) for a in args]) + kwargs = {k: from_pandas_hash(v) for k, v in kwargs.items()} + return user_function(*args, **kwargs) + + # retain lru_cache attributes + wrapper.cache_info = cached_wrapper.cache_info + wrapper.cache_clear = cached_wrapper.cache_clear + + return wrapper + + return decorating_function
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/python_splitters.html b/_modules/recommenders/datasets/python_splitters.html new file mode 100644 index 0000000000..a2aeaefe29 --- /dev/null +++ b/_modules/recommenders/datasets/python_splitters.html @@ -0,0 +1,667 @@ + + + + + + + + + + + recommenders.datasets.python_splitters — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.python_splitters

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+import numpy as np
+import pandas as pd
+from sklearn.model_selection import train_test_split as sk_split
+
+from recommenders.utils.constants import (
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_TIMESTAMP_COL,
+)
+from recommenders.datasets.split_utils import (
+    process_split_ratio,
+    min_rating_filter_pandas,
+    split_pandas_data_with_ratios,
+)
+
+
+
[docs]def python_random_split(data, ratio=0.75, seed=42): + """Pandas random splitter. + + The splitter randomly splits the input data. + + Args: + data (pandas.DataFrame): Pandas DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two halves and the ratio argument indicates the ratio + of training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + seed (int): Seed. + + Returns: + list: Splits of the input data as pandas.DataFrame. + """ + multi_split, ratio = process_split_ratio(ratio) + + if multi_split: + splits = split_pandas_data_with_ratios(data, ratio, shuffle=True, seed=seed) + splits_new = [x.drop("split_index", axis=1) for x in splits] + + return splits_new + else: + return sk_split(data, test_size=None, train_size=ratio, random_state=seed)
+ + +def _do_stratification( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + is_random=True, + seed=42, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, +): + # A few preliminary checks. + if not (filter_by == "user" or filter_by == "item"): + raise ValueError("filter_by should be either 'user' or 'item'.") + + if min_rating < 1: + raise ValueError("min_rating should be integer and larger than or equal to 1.") + + if col_user not in data.columns: + raise ValueError("Schema of data not valid. Missing User Col") + + if col_item not in data.columns: + raise ValueError("Schema of data not valid. Missing Item Col") + + if not is_random: + if col_timestamp not in data.columns: + raise ValueError("Schema of data not valid. Missing Timestamp Col") + + multi_split, ratio = process_split_ratio(ratio) + + split_by_column = col_user if filter_by == "user" else col_item + + ratio = ratio if multi_split else [ratio, 1 - ratio] + + if min_rating > 1: + data = min_rating_filter_pandas( + data, + min_rating=min_rating, + filter_by=filter_by, + col_user=col_user, + col_item=col_item, + ) + + if is_random: + np.random.seed(seed) + data["random"] = np.random.rand(data.shape[0]) + order_by = "random" + else: + order_by = col_timestamp + + data = data.sort_values([split_by_column, order_by]) + + groups = data.groupby(split_by_column) + + data["count"] = groups[split_by_column].transform("count") + data["rank"] = groups.cumcount() + 1 + + if is_random: + data = data.drop("random", axis=1) + + splits = [] + prev_threshold = None + for threshold in np.cumsum(ratio): + condition = data["rank"] <= round(threshold * data["count"]) + if prev_threshold is not None: + condition &= data["rank"] > round(prev_threshold * data["count"]) + splits.append(data[condition].drop(["rank", "count"], axis=1)) + prev_threshold = threshold + + return splits + + +
[docs]def python_chrono_split( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, +): + """Pandas chronological splitter. + + This function splits data in a chronological manner. That is, for each user / item, the + split function takes proportions of ratings which is specified by the split ratio(s). + The split is stratified. + + Args: + data (pandas.DataFrame): Pandas DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two halves and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + seed (int): Seed. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to + filter with min_rating. + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + col_timestamp (str): column name of timestamps. + + Returns: + list: Splits of the input data as pandas.DataFrame. + """ + return _do_stratification( + data, + ratio=ratio, + min_rating=min_rating, + filter_by=filter_by, + col_user=col_user, + col_item=col_item, + col_timestamp=col_timestamp, + is_random=False, + )
+ + +
[docs]def python_stratified_split( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + seed=42, +): + """Pandas stratified splitter. + + For each user / item, the split function takes proportions of ratings which is + specified by the split ratio(s). The split is stratified. + + Args: + data (pandas.DataFrame): Pandas DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two halves and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + seed (int): Seed. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to + filter with min_rating. + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + + Returns: + list: Splits of the input data as pandas.DataFrame. + """ + return _do_stratification( + data, + ratio=ratio, + min_rating=min_rating, + filter_by=filter_by, + col_user=col_user, + col_item=col_item, + is_random=True, + seed=seed, + )
+ + +
[docs]def numpy_stratified_split(X, ratio=0.75, seed=42): + """Split the user/item affinity matrix (sparse matrix) into train and test set matrices while maintaining + local (i.e. per user) ratios. + + Main points : + + 1. In a typical recommender problem, different users rate a different number of items, + and therefore the user/affinity matrix has a sparse structure with variable number + of zeroes (unrated items) per row (user). Cutting a total amount of ratings will + result in a non-homogeneous distribution between train and test set, i.e. some test + users may have many ratings while other very little if none. + + 2. In an unsupervised learning problem, no explicit answer is given. For this reason + the split needs to be implemented in a different way then in supervised learningself. + In the latter, one typically split the dataset by rows (by examples), ending up with + the same number of features but different number of examples in the train/test setself. + This scheme does not work in the unsupervised case, as part of the rated items needs to + be used as a test set for fixed number of users. + + Solution: + + 1. Instead of cutting a total percentage, for each user we cut a relative ratio of the rated + items. For example, if user1 has rated 4 items and user2 10, cutting 25% will correspond to + 1 and 2.6 ratings in the test set, approximated as 1 and 3 according to the round() function. + In this way, the 0.75 ratio is satisfied both locally and globally, preserving the original + distribution of ratings across the train and test set. + + 2. It is easy (and fast) to satisfy this requirements by creating the test via element subtraction + from the original dataset X. We first create two copies of X; for each user we select a random + sample of local size ratio (point 1) and erase the remaining ratings, obtaining in this way the + train set matrix Xtst. The train set matrix is obtained in the opposite way. + + Args: + X (numpy.ndarray, int): a sparse matrix to be split + ratio (float): fraction of the entire dataset to constitute the train set + seed (int): random seed + + Returns: + numpy.ndarray, numpy.ndarray: + - Xtr: The train set user/item affinity matrix. + - Xtst: The test set user/item affinity matrix. + """ + + np.random.seed(seed) # set the random seed + test_cut = int((1 - ratio) * 100) # percentage of ratings to go in the test set + + # initialize train and test set matrices + Xtr = X.copy() + Xtst = X.copy() + + # find the number of rated movies per user + rated = np.sum(Xtr != 0, axis=1) + + # for each user, cut down a test_size% for the test set + tst = np.around((rated * test_cut) / 100).astype(int) + + for u in range(X.shape[0]): + # For each user obtain the index of rated movies + idx = np.asarray(np.where(Xtr[u] != 0))[0].tolist() + + # extract a random subset of size n from the set of rated movies without repetition + idx_tst = np.random.choice(idx, tst[u], replace=False) + idx_train = list(set(idx).difference(set(idx_tst))) + + # change the selected rated movies to unrated in the train set + Xtr[u, idx_tst] = 0 + # set the movies that appear already in the train set as 0 + Xtst[u, idx_train] = 0 + + del idx, idx_train, idx_tst + + return Xtr, Xtst
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/spark_splitters.html b/_modules/recommenders/datasets/spark_splitters.html new file mode 100644 index 0000000000..e9053b610e --- /dev/null +++ b/_modules/recommenders/datasets/spark_splitters.html @@ -0,0 +1,666 @@ + + + + + + + + + + + recommenders.datasets.spark_splitters — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.spark_splitters

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+
+try:
+    from pyspark.sql import functions as F, Window
+    from pyspark.storagelevel import StorageLevel
+except ImportError:
+    pass  # skip this import if we are in pure python environment
+
+from recommenders.utils.constants import (
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_TIMESTAMP_COL,
+)
+from recommenders.datasets.split_utils import (
+    process_split_ratio,
+    min_rating_filter_spark,
+)
+
+
+
[docs]def spark_random_split(data, ratio=0.75, seed=42): + """Spark random splitter. + + Randomly split the data into several splits. + + Args: + data (pyspark.sql.DataFrame): Spark DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two halves and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list + is provided and the ratios are not summed to 1, they will be normalized. + seed (int): Seed. + + Returns: + list: Splits of the input data as pyspark.sql.DataFrame. + """ + multi_split, ratio = process_split_ratio(ratio) + + if multi_split: + return data.randomSplit(ratio, seed=seed) + else: + return data.randomSplit([ratio, 1 - ratio], seed=seed)
+ + +def _do_stratification_spark( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + is_partitioned=True, + is_random=True, + seed=42, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, +): + """Helper function to perform stratified splits. + + This function splits data in a stratified manner. That is, the same values for the + filter_by column are retained in each split, but the corresponding set of entries + are divided according to the ratio provided. + + Args: + data (pyspark.sql.DataFrame): Spark DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two sets and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to filter + with min_rating. + is_partitioned (bool): flag to partition data by filter_by column + is_random (bool): flag to make split randomly or use timestamp column + seed (int): Seed. + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + col_timestamp (str): column name of timestamps. + + Args: + + Returns: + """ + # A few preliminary checks. + if filter_by not in ["user", "item"]: + raise ValueError("filter_by should be either 'user' or 'item'.") + + if min_rating < 1: + raise ValueError("min_rating should be integer and larger than or equal to 1.") + + if col_user not in data.columns: + raise ValueError("Schema of data not valid. Missing User Col") + + if col_item not in data.columns: + raise ValueError("Schema of data not valid. Missing Item Col") + + if not is_random: + if col_timestamp not in data.columns: + raise ValueError("Schema of data not valid. Missing Timestamp Col") + + if min_rating > 1: + data = min_rating_filter_spark( + data=data, + min_rating=min_rating, + filter_by=filter_by, + col_user=col_user, + col_item=col_item, + ) + + split_by = col_user if filter_by == "user" else col_item + partition_by = split_by if is_partitioned else [] + + col_random = "_random" + if is_random: + data = data.withColumn(col_random, F.rand(seed=seed)) + order_by = F.col(col_random) + else: + order_by = F.col(col_timestamp) + + window_count = Window.partitionBy(partition_by) + window_spec = Window.partitionBy(partition_by).orderBy(order_by) + + data = ( + data.withColumn("_count", F.count(split_by).over(window_count)) + .withColumn("_rank", F.row_number().over(window_spec) / F.col("_count")) + .drop("_count", col_random) + ) + # Persist to avoid duplicate rows in splits caused by lazy evaluation + data.persist(StorageLevel.MEMORY_AND_DISK_2).count() + + multi_split, ratio = process_split_ratio(ratio) + ratio = ratio if multi_split else [ratio, 1 - ratio] + + splits = [] + prev_split = None + for split in np.cumsum(ratio): + condition = F.col("_rank") <= split + if prev_split is not None: + condition &= F.col("_rank") > prev_split + splits.append(data.filter(condition).drop("_rank")) + prev_split = split + + return splits + + +
[docs]def spark_chrono_split( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, + no_partition=False, +): + """Spark chronological splitter. + + This function splits data in a chronological manner. That is, for each user / item, the + split function takes proportions of ratings which is specified by the split ratio(s). + The split is stratified. + + Args: + data (pyspark.sql.DataFrame): Spark DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two sets and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to filter + with min_rating. + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + col_timestamp (str): column name of timestamps. + no_partition (bool): set to enable more accurate and less efficient splitting. + + Returns: + list: Splits of the input data as pyspark.sql.DataFrame. + """ + + return _do_stratification_spark( + data=data, + ratio=ratio, + min_rating=min_rating, + filter_by=filter_by, + is_random=False, + col_user=col_user, + col_item=col_item, + col_timestamp=col_timestamp, + )
+ + +
[docs]def spark_stratified_split( + data, + ratio=0.75, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + seed=42, +): + """Spark stratified splitter. + + For each user / item, the split function takes proportions of ratings which is + specified by the split ratio(s). The split is stratified. + + Args: + data (pyspark.sql.DataFrame): Spark DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two halves and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + Earlier indexed splits will have earlier times + (e.g. the latest time per user or item in split[0] <= the earliest time per user or item in split[1]) + seed (int): Seed. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to filter + with min_rating. + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + + Returns: + list: Splits of the input data as pyspark.sql.DataFrame. + """ + return _do_stratification_spark( + data=data, + ratio=ratio, + min_rating=min_rating, + filter_by=filter_by, + seed=seed, + col_user=col_user, + col_item=col_item, + )
+ + +
[docs]def spark_timestamp_split( + data, + ratio=0.75, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, +): + """Spark timestamp based splitter. + + The splitter splits the data into sets by timestamps without stratification on either user or item. + The ratios are applied on the timestamp column which is divided accordingly into several partitions. + + Args: + data (pyspark.sql.DataFrame): Spark DataFrame to be split. + ratio (float or list): Ratio for splitting data. If it is a single float number + it splits data into two sets and the ratio argument indicates the ratio of + training data set; if it is a list of float numbers, the splitter splits + data into several portions corresponding to the split ratios. If a list is + provided and the ratios are not summed to 1, they will be normalized. + Earlier indexed splits will have earlier times + (e.g. the latest time in split[0] <= the earliest time in split[1]) + col_user (str): column name of user IDs. + col_item (str): column name of item IDs. + col_timestamp (str): column name of timestamps. Float number represented in + seconds since Epoch. + + Returns: + list: Splits of the input data as pyspark.sql.DataFrame. + """ + return _do_stratification_spark( + data=data, + ratio=ratio, + is_random=False, + is_partitioned=False, + col_user=col_user, + col_item=col_item, + col_timestamp=col_timestamp, + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/sparse.html b/_modules/recommenders/datasets/sparse.html new file mode 100644 index 0000000000..eb9028d734 --- /dev/null +++ b/_modules/recommenders/datasets/sparse.html @@ -0,0 +1,574 @@ + + + + + + + + + + + recommenders.datasets.sparse — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.sparse

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import pandas as pd
+import numpy as np
+import itertools
+
+from scipy.sparse import coo_matrix
+import logging
+
+# import default parameters
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_PREDICTION_COL,
+)
+
+
+log = logging.getLogger(__name__)
+
+
+
[docs]class AffinityMatrix: + """Generate the user/item affinity matrix from a pandas dataframe and vice versa""" + + def __init__( + self, + df, + items_list=None, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_pred=DEFAULT_PREDICTION_COL, + save_path=None, + ): + """Initialize class parameters + + Args: + df (pandas.DataFrame): a dataframe containing the data + items_list (numpy.ndarray): a list of unique items to use (if provided) + col_user (str): default name for user column + col_item (str): default name for item column + col_rating (str): default name for rating columns + save_path (str): default path to save item/user maps + """ + self.df = df # dataframe + self.items_list = items_list # list of unique items + + # pandas DF parameters + self.col_item = col_item + self.col_user = col_user + self.col_rating = col_rating + self.col_pred = col_pred + + # Options to save the model for future use + self.save_path = save_path + + def _gen_index(self): + """ + Generate the user/item index: + map_users, map_items: dictionaries mapping the original user/item index to matrix indices + map_back_users, map_back_items: dictionaries to map back the matrix elements to the original + dataframe indices + + Basic mechanics: + As a first step we retieve the unique elements in the dataset. In this way we can take care + of either completely missing rows (a user with no ratings) or completely missing columns + (an item that has not being reviewed by anyone). The original indices in the dataframe are + then mapped to an ordered, contiguous integer series to generate a compact matrix representation. + Functions to map back to the original indices are also provided and can be saved in order to use + a pretrained model. + """ + # sort entries by user index + self.df_ = self.df.sort_values(by=[self.col_user]) + + # find unique user and item index + unique_users = self.df_[self.col_user].unique() + + if self.items_list is not None: + unique_items = self.items_list # use this list if provided + else: + unique_items = self.df_[ + self.col_item + ].unique() # otherwise use unique items from DF + + self.Nusers = len(unique_users) + self.Nitems = len(unique_items) + + # create a dictionary to map unique users/items to hashed values to generate the matrix + self.map_users = {x: i for i, x in enumerate(unique_users)} + self.map_items = {x: i for i, x in enumerate(unique_items)} + + # map back functions used to get back the original dataframe + self.map_back_users = {i: x for i, x in enumerate(unique_users)} + self.map_back_items = {i: x for i, x in enumerate(unique_items)} + + self.df_.loc[:, "hashedItems"] = self.df_[self.col_item].map(self.map_items) + self.df_.loc[:, "hashedUsers"] = self.df_[self.col_user].map(self.map_users) + + # optionally save the inverse dictionary to work with trained models + if self.save_path is not None: + + np.save(self.save_path + "/user_dict", self.map_users) + np.save(self.save_path + "/item_dict", self.map_items) + + np.save(self.save_path + "/user_back_dict", self.map_back_users) + np.save(self.save_path + "/item_back_dict", self.map_back_items) + +
[docs] def gen_affinity_matrix(self): + """Generate the user/item affinity matrix. + + As a first step, two new columns are added to the input DF, containing the index maps + generated by the gen_index() method. The new indices, together with the ratings, are + then used to generate the user/item affinity matrix using scipy's sparse matrix method + coo_matrix; for reference see: + https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.coo_matrix.html. + The input format is: `coo_matrix((data, (rows, columns)), shape=(rows, columns))` + + Returns: + scipy.sparse.coo_matrix: User-affinity matrix of dimensions (Nusers, Nitems) in numpy format. + Unrated movies are assigned a value of 0. + """ + + log.info("Generating the user/item affinity matrix...") + + self._gen_index() + + ratings = self.df_[self.col_rating] # ratings + itm_id = self.df_["hashedItems"] # itm_id serving as columns + usr_id = self.df_["hashedUsers"] # usr_id serving as rows + + # generate a sparse matrix representation using scipy's coo_matrix and convert to array format + self.AM = coo_matrix( + (ratings, (usr_id, itm_id)), shape=(self.Nusers, self.Nitems) + ).toarray() + + zero = (self.AM == 0).sum() # number of unrated items + total = self.AM.shape[0] * self.AM.shape[1] # number of elements in the matrix + sparsness = zero / total * 100 # Percentage of zeros in the matrix + + log.info("Matrix generated, sparseness percentage: %d" % sparsness) + + return self.AM, self.map_users, self.map_items
+ +
[docs] def map_back_sparse(self, X, kind): + """Map back the user/affinity matrix to a pd dataframe + + Args: + X (numpy.ndarray, int32): user/item affinity matrix + kind (string): specify if the output values are ratings or predictions + Returns: + pandas.DataFrame: the generated pandas dataframe + """ + m, n = X.shape + + # 1) Create a DF from a sparse matrix + # obtain the non zero items + items = [np.asanyarray(np.where(X[i, :] != 0)).flatten() for i in range(m)] + ratings = [X[i, items[i]] for i in range(m)] # obtain the non-zero ratings + + # Creates user ids following the DF format + userids = [] + for i in range(0, m): + userids.extend([i] * len(items[i])) + + # Flatten the lists to follow the DF input format + items = list(itertools.chain.from_iterable(items)) + ratings = list(itertools.chain.from_iterable(ratings)) + + if kind == "ratings": + col_out = self.col_rating + else: + col_out = self.col_pred + + # create a df + out_df = pd.DataFrame.from_dict( + {self.col_user: userids, self.col_item: items, col_out: ratings} + ) + + # 2) map back user/item ids to their original value + + out_df[self.col_user] = out_df[self.col_user].map(self.map_back_users) + out_df[self.col_item] = out_df[self.col_item].map(self.map_back_items) + + return out_df
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/split_utils.html b/_modules/recommenders/datasets/split_utils.html new file mode 100644 index 0000000000..b822ce3844 --- /dev/null +++ b/_modules/recommenders/datasets/split_utils.html @@ -0,0 +1,588 @@ + + + + + + + + + + + recommenders.datasets.split_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.split_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import math
+import logging
+
+from recommenders.utils.constants import DEFAULT_ITEM_COL, DEFAULT_USER_COL
+
+logger = logging.getLogger(__name__)
+
+try:
+    from pyspark.sql import functions as F, Window
+except ImportError:
+    pass  # so the environment without spark doesn't break
+
+
+
[docs]def process_split_ratio(ratio): + """Generate split ratio lists. + + Args: + ratio (float or list): a float number that indicates split ratio or a list of float + numbers that indicate split ratios (if it is a multi-split). + + Returns: + tuple: + - bool: A boolean variable multi that indicates if the splitting is multi or single. + - list: A list of normalized split ratios. + """ + if isinstance(ratio, float): + if ratio <= 0 or ratio >= 1: + raise ValueError("Split ratio has to be between 0 and 1") + + multi = False + elif isinstance(ratio, list): + if any([x <= 0 for x in ratio]): + raise ValueError( + "All split ratios in the ratio list should be larger than 0." + ) + + # normalize split ratios if they are not summed to 1 + if math.fsum(ratio) != 1.0: + ratio = [x / math.fsum(ratio) for x in ratio] + + multi = True + else: + raise TypeError("Split ratio should be either float or a list of floats.") + + return multi, ratio
+ + +
[docs]def min_rating_filter_pandas( + data, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Filter rating DataFrame for each user with minimum rating. + + Filter rating data frame with minimum number of ratings for user/item is usually useful to + generate a new data frame with warm user/item. The warmth is defined by min_rating argument. For + example, a user is called warm if he has rated at least 4 items. + + Args: + data (pandas.DataFrame): DataFrame of user-item tuples. Columns of user and item + should be present in the DataFrame while other columns like rating, + timestamp, etc. can be optional. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to + filter with min_rating. + col_user (str): column name of user ID. + col_item (str): column name of item ID. + + Returns: + pandas.DataFrame: DataFrame with at least columns of user and item that has been filtered by the given specifications. + """ + split_by_column = _get_column_name(filter_by, col_user, col_item) + + if min_rating < 1: + raise ValueError("min_rating should be integer and larger than or equal to 1.") + + return data.groupby(split_by_column).filter(lambda x: len(x) >= min_rating)
+ + +
[docs]def min_rating_filter_spark( + data, + min_rating=1, + filter_by="user", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Filter rating DataFrame for each user with minimum rating. + + Filter rating data frame with minimum number of ratings for user/item is usually useful to + generate a new data frame with warm user/item. The warmth is defined by min_rating argument. For + example, a user is called warm if he has rated at least 4 items. + + Args: + data (pyspark.sql.DataFrame): DataFrame of user-item tuples. Columns of user and item + should be present in the DataFrame while other columns like rating, + timestamp, etc. can be optional. + min_rating (int): minimum number of ratings for user or item. + filter_by (str): either "user" or "item", depending on which of the two is to + filter with min_rating. + col_user (str): column name of user ID. + col_item (str): column name of item ID. + + Returns: + pyspark.sql.DataFrame: DataFrame with at least columns of user and item that has been filtered by the given specifications. + """ + + split_by_column = _get_column_name(filter_by, col_user, col_item) + + if min_rating < 1: + raise ValueError("min_rating should be integer and larger than or equal to 1.") + + if min_rating > 1: + window = Window.partitionBy(split_by_column) + data = ( + data.withColumn("_count", F.count(split_by_column).over(window)) + .where(F.col("_count") >= min_rating) + .drop("_count") + ) + + return data
+ + +def _get_column_name(name, col_user, col_item): + if name == "user": + return col_user + elif name == "item": + return col_item + else: + raise ValueError("name should be either 'user' or 'item'.") + + +
[docs]def split_pandas_data_with_ratios(data, ratios, seed=42, shuffle=False): + """Helper function to split pandas DataFrame with given ratios + + Note: + Implementation referenced from `this source <https://stackoverflow.com/questions/38250710/how-to-split-data-into-3-sets-train-validation-and-test>`_. + + Args: + data (pandas.DataFrame): Pandas data frame to be split. + ratios (list of floats): list of ratios for split. The ratios have to sum to 1. + seed (int): random seed. + shuffle (bool): whether data will be shuffled when being split. + + Returns: + list: List of pd.DataFrame split by the given specifications. + """ + if math.fsum(ratios) != 1.0: + raise ValueError("The ratios have to sum to 1") + + split_index = np.cumsum(ratios).tolist()[:-1] + + if shuffle: + data = data.sample(frac=1, random_state=seed) + + splits = np.split(data, [round(x * len(data)) for x in split_index]) + + # Add split index (this makes splitting by group more efficient). + for i in range(len(ratios)): + splits[i]["split_index"] = i + + return splits
+ + +
[docs]def filter_k_core(data, core_num=0, col_user="userID", col_item="itemID"): + """Filter rating dataframe for minimum number of users and items by + repeatedly applying min_rating_filter until the condition is satisfied. + + """ + num_users, num_items = len(data[col_user].unique()), len(data[col_item].unique()) + logger.info("Original: %d users and %d items", num_users, num_items) + df_inp = data.copy() + + if core_num > 0: + while True: + df_inp = min_rating_filter_pandas( + df_inp, min_rating=core_num, filter_by="item" + ) + df_inp = min_rating_filter_pandas( + df_inp, min_rating=core_num, filter_by="user" + ) + count_u = df_inp.groupby(col_user)[col_item].count() + count_i = df_inp.groupby(col_item)[col_user].count() + if ( + len(count_i[count_i < core_num]) == 0 + and len(count_u[count_u < core_num]) == 0 + ): + break + df_inp = df_inp.sort_values(by=[col_user]) + num_users = len(df_inp[col_user].unique()) + num_items = len(df_inp[col_item].unique()) + logger.info("Final: %d users and %d items", num_users, num_items) + + return df_inp
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/datasets/wikidata.html b/_modules/recommenders/datasets/wikidata.html new file mode 100644 index 0000000000..88fb9975ac --- /dev/null +++ b/_modules/recommenders/datasets/wikidata.html @@ -0,0 +1,639 @@ + + + + + + + + + + + recommenders.datasets.wikidata — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.datasets.wikidata

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import pandas as pd
+import requests
+import logging
+from retrying import retry
+
+
+logger = logging.getLogger(__name__)
+
+
+API_URL_WIKIPEDIA = "https://en.wikipedia.org/w/api.php"
+API_URL_WIKIDATA = "https://query.wikidata.org/sparql"
+SESSION = None
+
+
+
[docs]def get_session(session=None): + """Get session object + + Args: + session (requests.Session): request session object + + Returns: + requests.Session: request session object + """ + + if session is None: + global SESSION + if SESSION is None: + SESSION = requests.Session() + session = SESSION + + return session
+ + +
[docs]@retry(wait_random_min=1000, wait_random_max=5000, stop_max_attempt_number=5) +def find_wikidata_id(name, limit=1, session=None): + """Find the entity ID in wikidata from a title string. + + Args: + name (str): A string with search terms (eg. "Batman (1989) film") + limit (int): Number of results to return + session (requests.Session): requests session to reuse connections + + Returns: + str: wikidata entityID corresponding to the title string. 'entityNotFound' will be returned if no page is found + """ + + session = get_session(session=session) + + params = dict( + action="query", + list="search", + srsearch=bytes(name, encoding="utf8"), + srlimit=limit, + srprop="", + format="json", + ) + + try: + response = session.get(API_URL_WIKIPEDIA, params=params) + page_id = response.json()["query"]["search"][0]["pageid"] + except Exception: + # TODO: distinguish between connection error and entity not found + logger.warning("ENTITY NOT FOUND") + return "entityNotFound" + + params = dict( + action="query", + prop="pageprops", + ppprop="wikibase_item", + pageids=[page_id], + format="json", + ) + + try: + response = session.get(API_URL_WIKIPEDIA, params=params) + entity_id = response.json()["query"]["pages"][str(page_id)]["pageprops"][ + "wikibase_item" + ] + except Exception: + # TODO: distinguish between connection error and entity not found + logger.warning("ENTITY NOT FOUND") + return "entityNotFound" + + return entity_id
+ + + + + +
[docs]def read_linked_entities(data): + """Obtain lists of liken entities (IDs and names) from dictionary + + Args: + data (json): dictionary with linked pages + + Returns: + list, list: + - List of liked entityIDs. + - List of liked entity names. + """ + + return [ + ( + c.get("valUrl").get("value").replace("http://www.wikidata.org/entity/", ""), + c.get("valLabel").get("value"), + ) + for c in data.get("results", {}).get("bindings", []) + ]
+ + +
[docs]@retry(wait_random_min=1000, wait_random_max=5000, stop_max_attempt_number=5) +def query_entity_description(entity_id, session=None): + """Query entity wikidata description from entityID + + Args: + entity_id (str): A wikidata page ID. + session (requests.Session): requests session to reuse connections + + Returns: + str: Wikidata short description of the entityID + descriptionNotFound' will be returned if no description is found + """ + query = ( + """ + PREFIX wd: <http://www.wikidata.org/entity/> + PREFIX schema: <http://schema.org/> + + SELECT ?o + WHERE + { + wd:""" + + entity_id + + """ schema:description ?o. + FILTER ( lang(?o) = "en" ) + } + """ + ) + + session = get_session(session=session) + + try: + r = session.get(API_URL_WIKIDATA, params=dict(query=query, format="json")) + description = r.json()["results"]["bindings"][0]["o"]["value"] + except Exception as e: # noqa: F841 + logger.warning("DESCRIPTION NOT FOUND") + return "descriptionNotFound" + + return description
+ + +
[docs]def search_wikidata(names, extras=None, describe=True, verbose=False): + """Create DataFrame of Wikidata search results + + Args: + names (list[str]): List of names to search for + extras (dict(str: list)): Optional extra items to assign to results for corresponding name + describe (bool): Optional flag to include description of entity + verbose (bool): Optional flag to print out intermediate data + + Returns: + pandas.DataFrame: Wikipedia results for all names with found entities + + """ + + results = [] + for idx, name in enumerate(names): + entity_id = find_wikidata_id(name) + if verbose: + print("name: {name}, entity_id: {id}".format(name=name, id=entity_id)) + + if entity_id == "entityNotFound": + continue + + json_links = query_entity_links(entity_id) + related_links = read_linked_entities(json_links) + description = query_entity_description(entity_id) if describe else "" + + for related_entity, related_name in related_links: + result = dict( + name=name, + original_entity=entity_id, + linked_entities=related_entity, + name_linked_entities=related_name, + ) + if describe: + result["description"] = description + if extras is not None: + for field, lst in extras.items(): + result[field] = lst[idx] + results.append(result) + + return pd.DataFrame(results)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/evaluation/python_evaluation.html b/_modules/recommenders/evaluation/python_evaluation.html new file mode 100644 index 0000000000..8e9651050e --- /dev/null +++ b/_modules/recommenders/evaluation/python_evaluation.html @@ -0,0 +1,2047 @@ + + + + + + + + + + + recommenders.evaluation.python_evaluation — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.evaluation.python_evaluation

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import pandas as pd
+from functools import wraps
+from sklearn.metrics import (
+    mean_squared_error,
+    mean_absolute_error,
+    r2_score,
+    explained_variance_score,
+    roc_auc_score,
+    log_loss,
+)
+
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_PREDICTION_COL,
+    DEFAULT_RELEVANCE_COL,
+    DEFAULT_SIMILARITY_COL,
+    DEFAULT_ITEM_FEATURES_COL,
+    DEFAULT_ITEM_SIM_MEASURE,
+    DEFAULT_K,
+    DEFAULT_THRESHOLD,
+)
+from recommenders.datasets.pandas_df_utils import (
+    has_columns,
+    has_same_base_dtype,
+    lru_cache_df,
+)
+
+
+
[docs]class ColumnMismatchError(Exception): + """Exception raised when there is a mismatch in columns. + + This exception is raised when an operation involving columns + encounters a mismatch or inconsistency. + + Attributes: + message (str): Explanation of the error. + """ + + pass
+ + +
[docs]class ColumnTypeMismatchError(Exception): + """Exception raised when there is a mismatch in column types. + + This exception is raised when an operation involving column types + encounters a mismatch or inconsistency. + + Attributes: + message (str): Explanation of the error. + """ + + pass
+ + +def _check_column_dtypes(func): + """Checks columns of DataFrame inputs + + This includes the checks on: + + * whether the input columns exist in the input DataFrames + * whether the data types of col_user as well as col_item are matched in the two input DataFrames. + + Args: + func (function): function that will be wrapped + + Returns: + function: Wrapper function for checking dtypes. + """ + + @wraps(func) + def check_column_dtypes_wrapper( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + *args, + **kwargs, + ): + """Check columns of DataFrame inputs + + Args: + rating_true (pandas.DataFrame): True data + rating_pred (pandas.DataFrame): Predicted data + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + """ + # Some ranking metrics don't have the rating column, so we don't need to check. + expected_true_columns = {col_user, col_item} + if "col_rating" in kwargs: + expected_true_columns.add(kwargs["col_rating"]) + if not has_columns(rating_true, expected_true_columns): + raise ColumnMismatchError("Missing columns in true rating DataFrame") + + if not has_columns(rating_pred, {col_user, col_item, col_prediction}): + raise ColumnMismatchError("Missing columns in predicted rating DataFrame") + + if not has_same_base_dtype( + rating_true, rating_pred, columns=[col_user, col_item] + ): + raise ColumnTypeMismatchError( + "Columns in provided DataFrames are not the same datatype" + ) + + return func( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + *args, + **kwargs, + ) + + return check_column_dtypes_wrapper + + +
[docs]@_check_column_dtypes +@lru_cache_df(maxsize=1) +def merge_rating_true_pred( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Join truth and prediction data frames on userID and itemID and return the true + and predicted rated with the correct index. + + Args: + rating_true (pandas.DataFrame): True data + rating_pred (pandas.DataFrame): Predicted data + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + numpy.ndarray: Array with the true ratings + numpy.ndarray: Array with the predicted ratings + + """ + + # pd.merge will apply suffixes to columns which have the same name across both dataframes + suffixes = ["_true", "_pred"] + rating_true_pred = pd.merge( + rating_true, rating_pred, on=[col_user, col_item], suffixes=suffixes + ) + if col_rating in rating_pred.columns: + col_rating = col_rating + suffixes[0] + if col_prediction in rating_true.columns: + col_prediction = col_prediction + suffixes[1] + return rating_true_pred[col_rating], rating_true_pred[col_prediction]
+ + +
[docs]def rmse( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate Root Mean Squared Error + + Args: + rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs + rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: Root mean squared error + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return np.sqrt(mean_squared_error(y_true, y_pred))
+ + +
[docs]def mae( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate Mean Absolute Error. + + Args: + rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs + rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: Mean Absolute Error. + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return mean_absolute_error(y_true, y_pred)
+ + +
[docs]def rsquared( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate R squared + + Args: + rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs + rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: R squared (min=0, max=1). + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return r2_score(y_true, y_pred)
+ + +
[docs]def exp_var( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate explained variance. + + Args: + rating_true (pandas.DataFrame): True data. There should be no duplicate (userID, itemID) pairs + rating_pred (pandas.DataFrame): Predicted data. There should be no duplicate (userID, itemID) pairs + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: Explained variance (min=0, max=1). + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return explained_variance_score(y_true, y_pred)
+ + +
[docs]def auc( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate the Area-Under-Curve metric for implicit feedback typed + recommender, where rating is binary and prediction is float number ranging + from 0 to 1. + + https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve + + Note: + The evaluation does not require a leave-one-out scenario. + This metric does not calculate group-based AUC which considers the AUC scores + averaged across users. It is also not limited to k. Instead, it calculates the + scores on the entire prediction results regardless the users. + + Args: + rating_true (pandas.DataFrame): True data + rating_pred (pandas.DataFrame): Predicted data + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: auc_score (min=0, max=1) + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return roc_auc_score(y_true, y_pred)
+ + +
[docs]def logloss( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, +): + """Calculate the logloss metric for implicit feedback typed + recommender, where rating is binary and prediction is float number ranging + from 0 to 1. + + https://en.wikipedia.org/wiki/Loss_functions_for_classification#Cross_entropy_loss_(Log_Loss) + + Args: + rating_true (pandas.DataFrame): True data + rating_pred (pandas.DataFrame): Predicted data + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + + Returns: + float: log_loss_score (min=-inf, max=inf) + """ + + y_true, y_pred = merge_rating_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_rating=col_rating, + col_prediction=col_prediction, + ) + return log_loss(y_true, y_pred)
+ + +
[docs]@_check_column_dtypes +@lru_cache_df(maxsize=1) +def merge_ranking_true_pred( + rating_true, + rating_pred, + col_user, + col_item, + col_prediction, + relevancy_method, + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + **_, +): + """Filter truth and prediction data frames on common users + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user (optional) + threshold (float): threshold of top items per user (optional) + + Returns: + pandas.DataFrame, pandas.DataFrame, int: DataFrame of recommendation hits, sorted by `col_user` and `rank` + DataFrame of hit counts vs actual relevant items per user number of unique user ids + """ + + # Make sure the prediction and true data frames have the same set of users + common_users = set(rating_true[col_user]).intersection(set(rating_pred[col_user])) + rating_true_common = rating_true[rating_true[col_user].isin(common_users)] + rating_pred_common = rating_pred[rating_pred[col_user].isin(common_users)] + n_users = len(common_users) + + # Return hit items in prediction data frame with ranking information. This is used for calculating NDCG and MAP. + # Use first to generate unique ranking values for each item. This is to align with the implementation in + # Spark evaluation metrics, where index of each recommended items (the indices are unique to items) is used + # to calculate penalized precision of the ordered items. + if relevancy_method == "top_k": + top_k = k + elif relevancy_method == "by_threshold": + top_k = threshold + elif relevancy_method is None: + top_k = None + else: + raise NotImplementedError("Invalid relevancy_method") + df_hit = get_top_k_items( + dataframe=rating_pred_common, + col_user=col_user, + col_rating=col_prediction, + k=top_k, + ) + df_hit = pd.merge(df_hit, rating_true_common, on=[col_user, col_item])[ + [col_user, col_item, "rank"] + ] + + # count the number of hits vs actual relevant items per user + df_hit_count = pd.merge( + df_hit.groupby(col_user, as_index=False)[col_user].agg({"hit": "count"}), + rating_true_common.groupby(col_user, as_index=False)[col_user].agg( + {"actual": "count"} + ), + on=col_user, + ) + + return df_hit, df_hit_count, n_users
+ + +
[docs]def precision_at_k( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + **_, +): + """Precision at K. + + Note: + We use the same formula to calculate precision@k as that in Spark. + More details can be found at + http://spark.apache.org/docs/2.1.1/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.precisionAt + In particular, the maximum achievable precision may be < 1, if the number of items for a + user in rating_pred is less than k. + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user + threshold (float): threshold of top items per user (optional) + + Returns: + float: precision at k (min=0, max=1) + """ + df_hit, df_hit_count, n_users = merge_ranking_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_hit.shape[0] == 0: + return 0.0 + + return (df_hit_count["hit"] / k).sum() / n_users
+ + +
[docs]def recall_at_k( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + **_, +): + """Recall at K. + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user + threshold (float): threshold of top items per user (optional) + + Returns: + float: recall at k (min=0, max=1). The maximum value is 1 even when fewer than + k items exist for a user in rating_true. + """ + df_hit, df_hit_count, n_users = merge_ranking_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_hit.shape[0] == 0: + return 0.0 + + return (df_hit_count["hit"] / df_hit_count["actual"]).sum() / n_users
+ + +
[docs]def ndcg_at_k( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + score_type="binary", + discfun_type="loge", + **_, +): + """Normalized Discounted Cumulative Gain (nDCG). + + Info: https://en.wikipedia.org/wiki/Discounted_cumulative_gain + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_rating (str): column name for rating + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user + threshold (float): threshold of top items per user (optional) + score_type (str): type of relevance scores ['binary', 'raw', 'exp']. With the default option 'binary', the + relevance score is reduced to either 1 (hit) or 0 (miss). Option 'raw' uses the raw relevance score. + Option 'exp' uses (2 ** RAW_RELEVANCE - 1) as the relevance score + discfun_type (str): type of discount function ['loge', 'log2'] used to calculate DCG. + + Returns: + float: nDCG at k (min=0, max=1). + """ + df_hit, _, _ = merge_ranking_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_hit.shape[0] == 0: + return 0.0 + + df_dcg = df_hit.merge(rating_pred, on=[col_user, col_item]).merge( + rating_true, on=[col_user, col_item], how="outer", suffixes=("_left", None) + ) + + if score_type == "binary": + df_dcg["rel"] = 1 + elif score_type == "raw": + df_dcg["rel"] = df_dcg[col_rating] + elif score_type == "exp": + df_dcg["rel"] = 2 ** df_dcg[col_rating] - 1 + else: + raise ValueError("score_type must be one of 'binary', 'raw', 'exp'") + + if discfun_type == "loge": + discfun = np.log + elif discfun_type == "log2": + discfun = np.log2 + else: + raise ValueError("discfun_type must be one of 'loge', 'log2'") + + # Calculate the actual discounted gain for each record + df_dcg["dcg"] = df_dcg["rel"] / discfun(1 + df_dcg["rank"]) + + # Calculate the ideal discounted gain for each record + df_idcg = df_dcg.sort_values([col_user, col_rating], ascending=False) + df_idcg["irank"] = df_idcg.groupby(col_user, as_index=False, sort=False)[ + col_rating + ].rank("first", ascending=False) + df_idcg["idcg"] = df_idcg["rel"] / discfun(1 + df_idcg["irank"]) + + # Calculate the actual DCG for each user + df_user = df_dcg.groupby(col_user, as_index=False, sort=False).agg({"dcg": "sum"}) + + # Calculate the ideal DCG for each user + df_user = df_user.merge( + df_idcg.groupby(col_user, as_index=False, sort=False) + .head(k) + .groupby(col_user, as_index=False, sort=False) + .agg({"idcg": "sum"}), + on=col_user, + ) + + # DCG over IDCG is the normalized DCG + df_user["ndcg"] = df_user["dcg"] / df_user["idcg"] + return df_user["ndcg"].mean()
+ + +@lru_cache_df(maxsize=1) +def _get_reciprocal_rank( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, +): + df_hit, df_hit_count, n_users = merge_ranking_true_pred( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_hit.shape[0] == 0: + return None, n_users + + # calculate reciprocal rank of items for each user and sum them up + df_hit_sorted = df_hit.copy() + df_hit_sorted["rr"] = ( + df_hit_sorted.groupby(col_user).cumcount() + 1 + ) / df_hit_sorted["rank"] + df_hit_sorted = df_hit_sorted.groupby(col_user).agg({"rr": "sum"}).reset_index() + + return pd.merge(df_hit_sorted, df_hit_count, on=col_user), n_users + + +
[docs]def map( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + **_, +): + """Mean Average Precision for top k prediction items + + The implementation of MAP is referenced from Spark MLlib evaluation metrics. + https://spark.apache.org/docs/2.3.0/mllib-evaluation-metrics.html#ranking-systems + + A good reference can be found at: + http://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-6-per.pdf + + Note: + The MAP is meant to calculate Avg. Precision for the relevant items, so it is normalized by the number of + relevant items in the ground truth data, instead of k. + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user + threshold (float): threshold of top items per user (optional) + + Returns: + float: MAP (min=0, max=1) + """ + df_merge, n_users = _get_reciprocal_rank( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_merge is None: + return 0.0 + else: + return (df_merge["rr"] / df_merge["actual"]).sum() / n_users
+ + +
[docs]def map_at_k( + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_prediction=DEFAULT_PREDICTION_COL, + relevancy_method="top_k", + k=DEFAULT_K, + threshold=DEFAULT_THRESHOLD, + **_, +): + """Mean Average Precision at k + + The implementation of MAP@k is referenced from Spark MLlib evaluation metrics. + https://github.com/apache/spark/blob/b938ff9f520fd4e4997938284ffa0aba9ea271fc/mllib/src/main/scala/org/apache/spark/mllib/evaluation/RankingMetrics.scala#L99 + + Args: + rating_true (pandas.DataFrame): True DataFrame + rating_pred (pandas.DataFrame): Predicted DataFrame + col_user (str): column name for user + col_item (str): column name for item + col_prediction (str): column name for prediction + relevancy_method (str): method for determining relevancy ['top_k', 'by_threshold', None]. None means that the + top k items are directly provided, so there is no need to compute the relevancy operation. + k (int): number of top k items per user + threshold (float): threshold of top items per user (optional) + + Returns: + float: MAP@k (min=0, max=1) + """ + df_merge, n_users = _get_reciprocal_rank( + rating_true=rating_true, + rating_pred=rating_pred, + col_user=col_user, + col_item=col_item, + col_prediction=col_prediction, + relevancy_method=relevancy_method, + k=k, + threshold=threshold, + ) + + if df_merge is None: + return 0.0 + else: + return ( + df_merge["rr"] / df_merge["actual"].apply(lambda x: min(x, k)) + ).sum() / n_users
+ + +
[docs]def get_top_k_items( + dataframe, col_user=DEFAULT_USER_COL, col_rating=DEFAULT_RATING_COL, k=DEFAULT_K +): + """Get the input customer-item-rating tuple in the format of Pandas + DataFrame, output a Pandas DataFrame in the dense format of top k items + for each user. + + Note: + If it is implicit rating, just append a column of constants to be + ratings. + + Args: + dataframe (pandas.DataFrame): DataFrame of rating data (in the format + customerID-itemID-rating) + col_user (str): column name for user + col_rating (str): column name for rating + k (int or None): number of items for each user; None means that the input has already been + filtered out top k items and sorted by ratings and there is no need to do that again. + + Returns: + pandas.DataFrame: DataFrame of top k items for each user, sorted by `col_user` and `rank` + """ + # Sort dataframe by col_user and (top k) col_rating + if k is None: + top_k_items = dataframe + else: + top_k_items = ( + dataframe.sort_values([col_user, col_rating], ascending=[True, False]) + .groupby(col_user, as_index=False) + .head(k) + .reset_index(drop=True) + ) + # Add ranks + top_k_items["rank"] = top_k_items.groupby(col_user, sort=False).cumcount() + 1 + return top_k_items
+ + +"""Function name and function mapper. +Useful when we have to serialize evaluation metric names +and call the functions based on deserialized names""" +metrics = { + rmse.__name__: rmse, + mae.__name__: mae, + rsquared.__name__: rsquared, + exp_var.__name__: exp_var, + precision_at_k.__name__: precision_at_k, + recall_at_k.__name__: recall_at_k, + ndcg_at_k.__name__: ndcg_at_k, + map_at_k.__name__: map_at_k, + map.__name__: map, +} + + +# diversity metrics +def _check_column_dtypes_diversity_serendipity(func): + """Checks columns of DataFrame inputs + + This includes the checks on: + + * whether the input columns exist in the input DataFrames + * whether the data types of col_user as well as col_item are matched in the two input DataFrames. + * whether reco_df contains any user_item pairs that are already shown in train_df + * check relevance column in reco_df + * check column names in item_feature_df + + Args: + func (function): function that will be wrapped + + Returns: + function: Wrapper function for checking dtypes. + """ + + @wraps(func) + def check_column_dtypes_diversity_serendipity_wrapper( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, + *args, + **kwargs, + ): + """Check columns of DataFrame inputs + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. + """ + + if not has_columns(train_df, [col_user, col_item]): + raise ValueError("Missing columns in train_df DataFrame") + if not has_columns(reco_df, [col_user, col_item]): + raise ValueError("Missing columns in reco_df DataFrame") + if not has_same_base_dtype(train_df, reco_df, columns=[col_user, col_item]): + raise ValueError("Columns in provided DataFrames are not the same datatype") + if col_relevance is None: + col_relevance = DEFAULT_RELEVANCE_COL + # relevance term, default is 1 (relevant) for all + reco_df = reco_df[[col_user, col_item]] + reco_df[col_relevance] = 1.0 + else: + col_relevance = col_relevance + reco_df = reco_df[[col_user, col_item, col_relevance]].astype( + {col_relevance: np.float16} + ) + if item_sim_measure == "item_feature_vector": + required_columns = [col_item, col_item_features] + if item_feature_df is not None: + if not has_columns(item_feature_df, required_columns): + raise ValueError("Missing columns in item_feature_df DataFrame") + else: + raise Exception( + "item_feature_df not specified! item_feature_df must be provided " + "if choosing to use item_feature_vector to calculate item similarity. " + "item_feature_df should have columns: " + str(required_columns) + ) + # check if reco_df contains any user_item pairs that are already shown in train_df + count_intersection = pd.merge( + train_df, reco_df, how="inner", on=[col_user, col_item] + ).shape[0] + if count_intersection != 0: + raise Exception( + "reco_df should not contain any user_item pairs that are already shown in train_df" + ) + + return func( + train_df=train_df, + reco_df=reco_df, + item_feature_df=item_feature_df, + item_sim_measure=item_sim_measure, + col_user=col_user, + col_item=col_item, + col_sim=col_sim, + col_relevance=col_relevance, + *args, + **kwargs, + ) + + return check_column_dtypes_diversity_serendipity_wrapper + + +def _check_column_dtypes_novelty_coverage(func): + """Checks columns of DataFrame inputs + + This includes the checks on: + + * whether the input columns exist in the input DataFrames + * whether the data types of col_user as well as col_item are matched in the two input DataFrames. + * whether reco_df contains any user_item pairs that are already shown in train_df + + Args: + func (function): function that will be wrapped + + Returns: + function: Wrapper function for checking dtypes. + """ + + @wraps(func) + def check_column_dtypes_novelty_coverage_wrapper( + train_df, + reco_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + *args, + **kwargs, + ): + """Check columns of DataFrame inputs + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + + """ + + if not has_columns(train_df, [col_user, col_item]): + raise ValueError("Missing columns in train_df DataFrame") + if not has_columns(reco_df, [col_user, col_item]): + raise ValueError("Missing columns in reco_df DataFrame") + if not has_same_base_dtype(train_df, reco_df, columns=[col_user, col_item]): + raise ValueError("Columns in provided DataFrames are not the same datatype") + + count_intersection = pd.merge( + train_df, reco_df, how="inner", on=[col_user, col_item] + ).shape[0] + if count_intersection != 0: + raise Exception( + "reco_df should not contain any user_item pairs that are already shown in train_df" + ) + + return func( + train_df=train_df, + reco_df=reco_df, + col_user=col_user, + col_item=col_item, + *args, + **kwargs, + ) + + return check_column_dtypes_novelty_coverage_wrapper + + +@lru_cache_df(maxsize=1) +def _get_pairwise_items( + df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Get pairwise combinations of items per user (ignoring duplicate pairs [1,2] == [2,1])""" + df_user_i1 = df[[col_user, col_item]] + df_user_i1.columns = [col_user, "i1"] + + df_user_i2 = df[[col_user, col_item]] + df_user_i2.columns = [col_user, "i2"] + + df_user_i1_i2 = pd.merge(df_user_i1, df_user_i2, how="inner", on=[col_user]) + + df_pairwise_items = df_user_i1_i2[(df_user_i1_i2["i1"] <= df_user_i1_i2["i2"])][ + [col_user, "i1", "i2"] + ].reset_index(drop=True) + return df_pairwise_items + + +@lru_cache_df(maxsize=1) +def _get_cosine_similarity( + train_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + if item_sim_measure == "item_cooccurrence_count": + # calculate item-item similarity based on item co-occurrence count + df_cosine_similarity = _get_cooccurrence_similarity( + train_df, col_user, col_item, col_sim + ) + elif item_sim_measure == "item_feature_vector": + # calculdf_cosine_similarity = ate item-item similarity based on item feature vectors + df_cosine_similarity = _get_item_feature_similarity( + item_feature_df, col_item_features, col_user, col_item + ) + else: + raise Exception( + "item_sim_measure not recognized! The available options include 'item_cooccurrence_count' and 'item_feature_vector'." + ) + return df_cosine_similarity + + +@lru_cache_df(maxsize=1) +def _get_cooccurrence_similarity( + train_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Cosine similarity metric from + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + The item indexes in the result are such that i1 <= i2. + """ + pairs = _get_pairwise_items(train_df, col_user, col_item) + pairs_count = pd.DataFrame( + {"count": pairs.groupby(["i1", "i2"]).size()} + ).reset_index() + item_count = pd.DataFrame( + {"count": train_df.groupby([col_item]).size()} + ).reset_index() + item_count["item_sqrt_count"] = item_count["count"] ** 0.5 + item_co_occur = pairs_count.merge( + item_count[[col_item, "item_sqrt_count"]], + left_on=["i1"], + right_on=[col_item], + ).drop(columns=[col_item]) + + item_co_occur.columns = ["i1", "i2", "count", "i1_sqrt_count"] + + item_co_occur = item_co_occur.merge( + item_count[[col_item, "item_sqrt_count"]], + left_on=["i2"], + right_on=[col_item], + ).drop(columns=[col_item]) + item_co_occur.columns = [ + "i1", + "i2", + "count", + "i1_sqrt_count", + "i2_sqrt_count", + ] + + item_co_occur[col_sim] = item_co_occur["count"] / ( + item_co_occur["i1_sqrt_count"] * item_co_occur["i2_sqrt_count"] + ) + df_cosine_similarity = ( + item_co_occur[["i1", "i2", col_sim]] + .sort_values(["i1", "i2"]) + .reset_index(drop=True) + ) + + return df_cosine_similarity + + +@lru_cache_df(maxsize=1) +def _get_item_feature_similarity( + item_feature_df, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Cosine similarity metric based on item feature vectors + + The item indexes in the result are such that i1 <= i2. + """ + df1 = item_feature_df[[col_item, col_item_features]] + df1.columns = ["i1", "f1"] + df1["key"] = 0 + df2 = item_feature_df[[col_item, col_item_features]] + df2.columns = ["i2", "f2"] + df2["key"] = 0 + + df = pd.merge(df1, df2, on="key", how="outer").drop("key", axis=1) + df_item_feature_pair = df[(df["i1"] <= df["i2"])].reset_index(drop=True) + + df_item_feature_pair[col_sim] = df_item_feature_pair.apply( + lambda x: float(x.f1.dot(x.f2)) + / float(np.linalg.norm(x.f1, 2) * np.linalg.norm(x.f2, 2)), + axis=1, + ) + + df_cosine_similarity = df_item_feature_pair[["i1", "i2", col_sim]].sort_values( + ["i1", "i2"] + ) + + return df_cosine_similarity + + +# Diversity metrics +@lru_cache_df(maxsize=1) +def _get_intralist_similarity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, +): + """Intra-list similarity from + + :Citation: + + "Improving Recommendation Lists Through Topic Diversification", + Ziegler, McNee, Konstan and Lausen, 2005. + """ + pairs = _get_pairwise_items(reco_df, col_user, col_item) + similarity_df = _get_cosine_similarity( + train_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + # Fillna(0) is needed in the cases where similarity_df does not have an entry for a pair of items. + # e.g. i1 and i2 have never occurred together. + + item_pair_sim = pairs.merge(similarity_df, on=["i1", "i2"], how="left") + item_pair_sim[col_sim].fillna(0, inplace=True) + item_pair_sim = item_pair_sim.loc[ + item_pair_sim["i1"] != item_pair_sim["i2"] + ].reset_index(drop=True) + df_intralist_similarity = ( + item_pair_sim.groupby([col_user]).agg({col_sim: "mean"}).reset_index() + ) + df_intralist_similarity.columns = [col_user, "avg_il_sim"] + + return df_intralist_similarity + + +
[docs]@_check_column_dtypes_diversity_serendipity +@lru_cache_df(maxsize=1) +def user_diversity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average diversity of recommendations for each user. + The metric definition is based on formula (3) in the following reference: + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they have interacted with; + contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). + Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually relevant to the user or not. + + Returns: + pandas.DataFrame: A dataframe with the following columns: col_user, user_diversity. + """ + + df_intralist_similarity = _get_intralist_similarity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + df_user_diversity = df_intralist_similarity + df_user_diversity["user_diversity"] = 1 - df_user_diversity["avg_il_sim"] + df_user_diversity = ( + df_user_diversity[[col_user, "user_diversity"]] + .sort_values(col_user) + .reset_index(drop=True) + ) + + return df_user_diversity
+ + +
[docs]@_check_column_dtypes_diversity_serendipity +def diversity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average diversity of recommendations across all users. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they have interacted with; + contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, col_relevance (optional). + Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually relevant to the user or not. + + Returns: + float: diversity. + """ + df_user_diversity = user_diversity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + avg_diversity = df_user_diversity.agg({"user_diversity": "mean"})[0] + return avg_diversity
+ + +# Novelty metrics +
[docs]@_check_column_dtypes_novelty_coverage +@lru_cache_df(maxsize=1) +def historical_item_novelty( + train_df, + reco_df, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, +): + """Calculate novelty for each item. Novelty is computed as the minus logarithm of + (number of interactions with item / total number of interactions). The definition of the metric + is based on the following reference using the choice model (eqs. 1 and 6): + + :Citation: + + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 + + The novelty of an item can be defined relative to a set of observed events on the set of all items. + These can be events of user choice (item "is picked" by a random user) or user discovery + (item "is known" to a random user). The above definition of novelty reflects a factor of item popularity. + High novelty values correspond to long-tail items in the density function, that few users have interacted + with and low novelty values correspond to popular head items. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + + Returns: + pandas.DataFrame: A dataframe with the following columns: col_item, item_novelty. + """ + + n_records = train_df.shape[0] + item_count = pd.DataFrame( + {"count": train_df.groupby([col_item]).size()} + ).reset_index() + item_count["item_novelty"] = -np.log2(item_count["count"] / n_records) + df_item_novelty = ( + item_count[[col_item, "item_novelty"]] + .sort_values(col_item) + .reset_index(drop=True) + ) + + return df_item_novelty
+ + +
[docs]@_check_column_dtypes_novelty_coverage +def novelty(train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL): + """Calculate the average novelty in a list of recommended items (this assumes that the recommendation list + is already computed). Follows section 5 from + + :Citation: + + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + + Returns: + float: novelty. + """ + + df_item_novelty = historical_item_novelty(train_df, reco_df, col_user, col_item) + n_recommendations = reco_df.shape[0] + reco_item_count = pd.DataFrame( + {"count": reco_df.groupby([col_item]).size()} + ).reset_index() + reco_item_novelty = reco_item_count.merge(df_item_novelty, on=col_item) + reco_item_novelty["product"] = ( + reco_item_novelty["count"] * reco_item_novelty["item_novelty"] + ) + avg_novelty = reco_item_novelty.agg({"product": "sum"})[0] / n_recommendations + + return avg_novelty
+ + +# Serendipity metrics +
[docs]@_check_column_dtypes_diversity_serendipity +@lru_cache_df(maxsize=1) +def user_item_serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate serendipity of each item in the recommendations for each user. + The metric definition is based on the following references: + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, + eugeneyan.com, April 2020 + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. + Returns: + pandas.DataFrame: A dataframe with columns: col_user, col_item, user_item_serendipity. + """ + # for every col_user, col_item in reco_df, join all interacted items from train_df. + # These interacted items are repeated for each item in reco_df for a specific user. + df_cosine_similarity = _get_cosine_similarity( + train_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + ) + reco_user_item = reco_df[[col_user, col_item]] + reco_user_item["reco_item_tmp"] = reco_user_item[col_item] + + train_user_item = train_df[[col_user, col_item]] + train_user_item.columns = [col_user, "train_item_tmp"] + + reco_train_user_item = reco_user_item.merge(train_user_item, on=[col_user]) + reco_train_user_item["i1"] = reco_train_user_item[ + ["reco_item_tmp", "train_item_tmp"] + ].min(axis=1) + reco_train_user_item["i2"] = reco_train_user_item[ + ["reco_item_tmp", "train_item_tmp"] + ].max(axis=1) + + reco_train_user_item_sim = reco_train_user_item.merge( + df_cosine_similarity, on=["i1", "i2"], how="left" + ) + reco_train_user_item_sim[col_sim].fillna(0, inplace=True) + + reco_user_item_avg_sim = ( + reco_train_user_item_sim.groupby([col_user, col_item]) + .agg({col_sim: "mean"}) + .reset_index() + ) + reco_user_item_avg_sim.columns = [ + col_user, + col_item, + "avg_item2interactedHistory_sim", + ] + + df_user_item_serendipity = reco_user_item_avg_sim.merge( + reco_df, on=[col_user, col_item] + ) + df_user_item_serendipity["user_item_serendipity"] = ( + 1 - df_user_item_serendipity["avg_item2interactedHistory_sim"] + ) * df_user_item_serendipity[col_relevance] + df_user_item_serendipity = ( + df_user_item_serendipity[[col_user, col_item, "user_item_serendipity"]] + .sort_values([col_user, col_item]) + .reset_index(drop=True) + ) + + return df_user_item_serendipity
+ + +
[docs]@lru_cache_df(maxsize=1) +@_check_column_dtypes_diversity_serendipity +def user_serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average serendipity for each user's recommendations. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. + Returns: + pandas.DataFrame: A dataframe with following columns: col_user, user_serendipity. + """ + df_user_item_serendipity = user_item_serendipity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + col_relevance, + ) + df_user_serendipity = ( + df_user_item_serendipity.groupby(col_user) + .agg({"user_item_serendipity": "mean"}) + .reset_index() + ) + df_user_serendipity.columns = [col_user, "user_serendipity"] + df_user_serendipity = df_user_serendipity.sort_values(col_user).reset_index( + drop=True + ) + + return df_user_serendipity
+ + +
[docs]@_check_column_dtypes_diversity_serendipity +def serendipity( + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_item_features=DEFAULT_ITEM_FEATURES_COL, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_sim=DEFAULT_SIMILARITY_COL, + col_relevance=None, +): + """Calculate average serendipity for recommendations across all users. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pandas.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_item_features (str): item feature column name. + col_user (str): User id column name. + col_item (str): Item id column name. + col_sim (str): This column indicates the column name for item similarity. + col_relevance (str): This column indicates whether the recommended item is actually + relevant to the user or not. + Returns: + float: serendipity. + """ + df_user_serendipity = user_serendipity( + train_df, + reco_df, + item_feature_df, + item_sim_measure, + col_item_features, + col_user, + col_item, + col_sim, + col_relevance, + ) + avg_serendipity = df_user_serendipity.agg({"user_serendipity": "mean"})[0] + return avg_serendipity
+ + +# Coverage metrics +
[docs]@_check_column_dtypes_novelty_coverage +def catalog_coverage( + train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL +): + """Calculate catalog coverage for recommendations across all users. + The metric definition is based on the "catalog coverage" definition in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + + Returns: + float: catalog coverage + """ + # distinct item count in reco_df + count_distinct_item_reco = reco_df[col_item].nunique() + # distinct item count in train_df + count_distinct_item_train = train_df[col_item].nunique() + + # catalog coverage + c_coverage = count_distinct_item_reco / count_distinct_item_train + return c_coverage
+ + +
[docs]@_check_column_dtypes_novelty_coverage +def distributional_coverage( + train_df, reco_df, col_user=DEFAULT_USER_COL, col_item=DEFAULT_ITEM_COL +): + """Calculate distributional coverage for recommendations across all users. + The metric definition is based on formula (21) in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Args: + train_df (pandas.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pandas.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + col_user (str): User id column name. + col_item (str): Item id column name. + + Returns: + float: distributional coverage + """ + # In reco_df, how many times each col_item is being recommended + df_itemcnt_reco = pd.DataFrame( + {"count": reco_df.groupby([col_item]).size()} + ).reset_index() + + # the number of total recommendations + count_row_reco = reco_df.shape[0] + + df_entropy = df_itemcnt_reco + df_entropy["p(i)"] = df_entropy["count"] / count_row_reco + df_entropy["entropy(i)"] = df_entropy["p(i)"] * np.log2(df_entropy["p(i)"]) + + d_coverage = -df_entropy.agg({"entropy(i)": "sum"})[0] + + return d_coverage
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/evaluation/spark_evaluation.html b/_modules/recommenders/evaluation/spark_evaluation.html new file mode 100644 index 0000000000..4c820bab2c --- /dev/null +++ b/_modules/recommenders/evaluation/spark_evaluation.html @@ -0,0 +1,1347 @@ + + + + + + + + + + + recommenders.evaluation.spark_evaluation — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.evaluation.spark_evaluation

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+
+try:
+    from pyspark.mllib.evaluation import RegressionMetrics, RankingMetrics
+    from pyspark.sql import Window, DataFrame
+    from pyspark.sql.functions import col, row_number, expr
+    from pyspark.sql.functions import udf
+    import pyspark.sql.functions as F
+    from pyspark.sql.types import IntegerType, DoubleType, StructType, StructField
+    from pyspark.ml.linalg import VectorUDT
+except ImportError:
+    pass  # skip this import if we are in pure python environment
+
+from recommenders.utils.constants import (
+    DEFAULT_PREDICTION_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_RELEVANCE_COL,
+    DEFAULT_SIMILARITY_COL,
+    DEFAULT_ITEM_FEATURES_COL,
+    DEFAULT_ITEM_SIM_MEASURE,
+    DEFAULT_TIMESTAMP_COL,
+    DEFAULT_K,
+    DEFAULT_THRESHOLD,
+)
+
+
+
[docs]class SparkRatingEvaluation: + """Spark Rating Evaluator""" + + def __init__( + self, + rating_true, + rating_pred, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + ): + """Initializer. + + This is the Spark version of rating metrics evaluator. + The methods of this class, calculate rating metrics such as root mean squared error, mean absolute error, + R squared, and explained variance. + + Args: + rating_true (pyspark.sql.DataFrame): True labels. + rating_pred (pyspark.sql.DataFrame): Predicted labels. + col_user (str): column name for user. + col_item (str): column name for item. + col_rating (str): column name for rating. + col_prediction (str): column name for prediction. + """ + self.rating_true = rating_true + self.rating_pred = rating_pred + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_prediction = col_prediction + + # Check if inputs are Spark DataFrames. + if not isinstance(self.rating_true, DataFrame): + raise TypeError( + "rating_true should be but is not a Spark DataFrame" + ) # pragma : No Cover + + if not isinstance(self.rating_pred, DataFrame): + raise TypeError( + "rating_pred should be but is not a Spark DataFrame" + ) # pragma : No Cover + + # Check if columns exist. + true_columns = self.rating_true.columns + pred_columns = self.rating_pred.columns + + if rating_true.count() == 0: + raise ValueError("Empty input dataframe") + if rating_pred.count() == 0: + raise ValueError("Empty input dataframe") + + if self.col_user not in true_columns: + raise ValueError("Schema of rating_true not valid. Missing User Col") + if self.col_item not in true_columns: + raise ValueError("Schema of rating_true not valid. Missing Item Col") + if self.col_rating not in true_columns: + raise ValueError("Schema of rating_true not valid. Missing Rating Col") + + if self.col_user not in pred_columns: + raise ValueError( + "Schema of rating_pred not valid. Missing User Col" + ) # pragma : No Cover + if self.col_item not in pred_columns: + raise ValueError( + "Schema of rating_pred not valid. Missing Item Col" + ) # pragma : No Cover + if self.col_prediction not in pred_columns: + raise ValueError("Schema of rating_pred not valid. Missing Prediction Col") + + self.rating_true = self.rating_true.select( + col(self.col_user), + col(self.col_item), + col(self.col_rating).cast("double").alias("label"), + ) + self.rating_pred = self.rating_pred.select( + col(self.col_user), + col(self.col_item), + col(self.col_prediction).cast("double").alias("prediction"), + ) + + self.y_pred_true = ( + self.rating_true.join( + self.rating_pred, [self.col_user, self.col_item], "inner" + ) + .drop(self.col_user) + .drop(self.col_item) + ) + + self.metrics = RegressionMetrics( + self.y_pred_true.rdd.map(lambda x: (x.prediction, x.label)) + ) + +
[docs] def rmse(self): + """Calculate Root Mean Squared Error. + + Returns: + float: Root mean squared error. + """ + return self.metrics.rootMeanSquaredError
+ +
[docs] def mae(self): + """Calculate Mean Absolute Error. + + Returns: + float: Mean Absolute Error. + """ + return self.metrics.meanAbsoluteError
+ +
[docs] def rsquared(self): + """Calculate R squared. + + Returns: + float: R squared. + """ + return self.metrics.r2
+ +
[docs] def exp_var(self): + """Calculate explained variance. + + Note: + Spark MLLib's implementation is buggy (can lead to values > 1), hence we use var(). + + Returns: + float: Explained variance (min=0, max=1). + """ + var1 = self.y_pred_true.selectExpr("variance(label-prediction)").collect()[0][0] + var2 = self.y_pred_true.selectExpr("variance(label)").collect()[0][0] + + if var1 is None or var2 is None: + return -np.inf + else: + # numpy divide is more tolerant to var2 being zero + return 1 - np.divide(var1, var2)
+ + +
[docs]class SparkRankingEvaluation: + """Spark Ranking Evaluator""" + + def __init__( + self, + rating_true, + rating_pred, + k=DEFAULT_K, + relevancy_method="top_k", + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + threshold=DEFAULT_THRESHOLD, + ): + """Initialization. + This is the Spark version of ranking metrics evaluator. + The methods of this class, calculate ranking metrics such as precision@k, recall@k, ndcg@k, and mean average + precision. + + The implementations of precision@k, ndcg@k, and mean average precision are referenced from Spark MLlib, which + can be found at `the link <https://spark.apache.org/docs/2.3.0/mllib-evaluation-metrics.html#ranking-systems>`_. + + Args: + rating_true (pyspark.sql.DataFrame): DataFrame of true rating data (in the + format of customerID-itemID-rating tuple). + rating_pred (pyspark.sql.DataFrame): DataFrame of predicted rating data (in + the format of customerID-itemID-rating tuple). + col_user (str): column name for user. + col_item (str): column name for item. + col_rating (str): column name for rating. + col_prediction (str): column name for prediction. + k (int): number of items to recommend to each user. + relevancy_method (str): method for determining relevant items. Possible + values are "top_k", "by_time_stamp", and "by_threshold". + threshold (float): threshold for determining the relevant recommended items. + This is used for the case that predicted ratings follow a known + distribution. NOTE: this option is only activated if `relevancy_method` is + set to "by_threshold". + """ + self.rating_true = rating_true + self.rating_pred = rating_pred + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_prediction = col_prediction + self.threshold = threshold + + # Check if inputs are Spark DataFrames. + if not isinstance(self.rating_true, DataFrame): + raise TypeError( + "rating_true should be but is not a Spark DataFrame" + ) # pragma : No Cover + + if not isinstance(self.rating_pred, DataFrame): + raise TypeError( + "rating_pred should be but is not a Spark DataFrame" + ) # pragma : No Cover + + # Check if columns exist. + true_columns = self.rating_true.columns + pred_columns = self.rating_pred.columns + + if self.col_user not in true_columns: + raise ValueError( + "Schema of rating_true not valid. Missing User Col: " + + str(true_columns) + ) + if self.col_item not in true_columns: + raise ValueError("Schema of rating_true not valid. Missing Item Col") + if self.col_rating not in true_columns: + raise ValueError("Schema of rating_true not valid. Missing Rating Col") + + if self.col_user not in pred_columns: + raise ValueError( + "Schema of rating_pred not valid. Missing User Col" + ) # pragma : No Cover + if self.col_item not in pred_columns: + raise ValueError( + "Schema of rating_pred not valid. Missing Item Col" + ) # pragma : No Cover + if self.col_prediction not in pred_columns: + raise ValueError("Schema of rating_pred not valid. Missing Prediction Col") + + self.k = k + + relevant_func = { + "top_k": _get_top_k_items, + "by_time_stamp": _get_relevant_items_by_timestamp, + "by_threshold": _get_relevant_items_by_threshold, + } + + if relevancy_method not in relevant_func: + raise ValueError( + "relevancy_method should be one of {}".format( + list(relevant_func.keys()) + ) + ) + + self.rating_pred = ( + relevant_func[relevancy_method]( + dataframe=self.rating_pred, + col_user=self.col_user, + col_item=self.col_item, + col_rating=self.col_prediction, + threshold=self.threshold, + ) + if relevancy_method == "by_threshold" + else relevant_func[relevancy_method]( + dataframe=self.rating_pred, + col_user=self.col_user, + col_item=self.col_item, + col_rating=self.col_prediction, + k=self.k, + ) + ) + + self._metrics = self._calculate_metrics() + + def _calculate_metrics(self): + """Calculate ranking metrics.""" + self._items_for_user_pred = self.rating_pred + + self._items_for_user_true = ( + self.rating_true.groupBy(self.col_user) + .agg(expr("collect_list(" + self.col_item + ") as ground_truth")) + .select(self.col_user, "ground_truth") + ) + + self._items_for_user_all = self._items_for_user_pred.join( + self._items_for_user_true, on=self.col_user + ).drop(self.col_user) + + return RankingMetrics(self._items_for_user_all.rdd) + +
[docs] def precision_at_k(self): + """Get precision@k. + + Note: + More details can be found + `on the precisionAt PySpark documentation <http://spark.apache.org/docs/3.0.0/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.precisionAt>`_. + + Return: + float: precision at k (min=0, max=1) + """ + return self._metrics.precisionAt(self.k)
+ +
[docs] def recall_at_k(self): + """Get recall@K. + + Note: + More details can be found + `on the recallAt PySpark documentation <http://spark.apache.org/docs/3.0.0/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.recallAt>`_. + + Return: + float: recall at k (min=0, max=1). + """ + return self._metrics.recallAt(self.k)
+ +
[docs] def ndcg_at_k(self): + """Get Normalized Discounted Cumulative Gain (NDCG) + + Note: + More details can be found + `on the ndcgAt PySpark documentation <http://spark.apache.org/docs/3.0.0/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.ndcgAt>`_. + + Return: + float: nDCG at k (min=0, max=1). + """ + return self._metrics.ndcgAt(self.k)
+ +
[docs] def map(self): + """Get mean average precision. + + Return: + float: MAP (min=0, max=1). + """ + return self._metrics.meanAveragePrecision
+ +
[docs] def map_at_k(self): + """Get mean average precision at k. + + Note: + More details `on the meanAveragePrecision PySpark documentation <http://spark.apache.org/docs/3.0.0/api/python/pyspark.mllib.html#pyspark.mllib.evaluation.RankingMetrics.meanAveragePrecision>`_. + + Return: + float: MAP at k (min=0, max=1). + """ + return self._metrics.meanAveragePrecisionAt(self.k)
+ + +def _get_top_k_items( + dataframe, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + k=DEFAULT_K, +): + """Get the input customer-item-rating tuple in the format of Spark + DataFrame, output a Spark DataFrame in the dense format of top k items + for each user. + + Note: + if it is implicit rating, just append a column of constants to be ratings. + + Args: + dataframe (pyspark.sql.DataFrame): DataFrame of rating data (in the format of + customerID-itemID-rating tuple). + col_user (str): column name for user. + col_item (str): column name for item. + col_rating (str): column name for rating. + col_prediction (str): column name for prediction. + k (int): number of items for each user. + + Return: + pyspark.sql.DataFrame: DataFrame of top k items for each user. + """ + window_spec = Window.partitionBy(col_user).orderBy(col(col_rating).desc()) + + # this does not work for rating of the same value. + items_for_user = ( + dataframe.select( + col_user, col_item, col_rating, row_number().over(window_spec).alias("rank") + ) + .where(col("rank") <= k) + .groupby(col_user) + .agg(F.collect_list(col_item).alias(col_prediction)) + ) + + return items_for_user + + +def _get_relevant_items_by_threshold( + dataframe, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + threshold=DEFAULT_THRESHOLD, +): + """Get relevant items for each customer in the input rating data. + + Relevant items are defined as those having ratings above certain threshold. + The threshold is defined as a statistical measure of the ratings for a + user, e.g., median. + + Args: + dataframe: Spark DataFrame of customerID-itemID-rating tuples. + col_user (str): column name for user. + col_item (str): column name for item. + col_rating (str): column name for rating. + col_prediction (str): column name for prediction. + threshold (float): threshold for determining the relevant recommended items. + This is used for the case that predicted ratings follow a known + distribution. + + Return: + pyspark.sql.DataFrame: DataFrame of customerID-itemID-rating tuples with only relevant + items. + """ + items_for_user = ( + dataframe.orderBy(col_rating, ascending=False) + .where(col_rating + " >= " + str(threshold)) + .select(col_user, col_item, col_rating) + .withColumn( + col_prediction, F.collect_list(col_item).over(Window.partitionBy(col_user)) + ) + .select(col_user, col_prediction) + .dropDuplicates() + ) + + return items_for_user + + +def _get_relevant_items_by_timestamp( + dataframe, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, + col_prediction=DEFAULT_PREDICTION_COL, + k=DEFAULT_K, +): + """Get relevant items for each customer defined by timestamp. + + Relevant items are defined as k items that appear mostly recently + according to timestamps. + + Args: + dataframe (pyspark.sql.DataFrame): A Spark DataFrame of customerID-itemID-rating-timeStamp + tuples. + col_user (str): column name for user. + col_item (str): column name for item. + col_rating (str): column name for rating. + col_timestamp (str): column name for timestamp. + col_prediction (str): column name for prediction. + k: number of relevant items to be filtered by the function. + + Return: + pyspark.sql.DataFrame: DataFrame of customerID-itemID-rating tuples with only relevant items. + """ + window_spec = Window.partitionBy(col_user).orderBy(col(col_timestamp).desc()) + + items_for_user = ( + dataframe.select( + col_user, col_item, col_rating, row_number().over(window_spec).alias("rank") + ) + .where(col("rank") <= k) + .withColumn( + col_prediction, F.collect_list(col_item).over(Window.partitionBy(col_user)) + ) + .select(col_user, col_prediction) + .dropDuplicates([col_user, col_prediction]) + ) + + return items_for_user + + +
[docs]class SparkDiversityEvaluation: + """Spark Evaluator for diversity, coverage, novelty, serendipity""" + + def __init__( + self, + train_df, + reco_df, + item_feature_df=None, + item_sim_measure=DEFAULT_ITEM_SIM_MEASURE, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_relevance=None, + ): + """Initializer. + + This is the Spark version of diversity metrics evaluator. + The methods of this class calculate the following diversity metrics: + + * Coverage - it includes two metrics: + 1. catalog_coverage, which measures the proportion of items that get recommended from the item catalog; + 2. distributional_coverage, which measures how unequally different items are recommended in the + recommendations to all users. + * Novelty - A more novel item indicates it is less popular, i.e. it gets recommended less frequently. + * Diversity - The dissimilarity of items being recommended. + * Serendipity - The "unusualness" or "surprise" of recommendations to a user. When 'col_relevance' is used, + it indicates how "pleasant surprise" of recommendations is to a user. + + The metric definitions/formulations are based on the following references with modification: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: introducing + serendipity into music recommendation, WSDM 2012 + + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 + + Eugene Yan, Serendipity: Accuracy's unpopular best friend in Recommender Systems, + eugeneyan.com, April 2020 + + Args: + train_df (pyspark.sql.DataFrame): Data set with historical data for users and items they + have interacted with; contains col_user, col_item. Assumed to not contain any duplicate rows. + Interaction here follows the *item choice model* from Castells et al. + reco_df (pyspark.sql.DataFrame): Recommender's prediction output, containing col_user, col_item, + col_relevance (optional). Assumed to not contain any duplicate user-item pairs. + item_feature_df (pyspark.sql.DataFrame): (Optional) It is required only when item_sim_measure='item_feature_vector'. + It contains two columns: col_item and features (a feature vector). + item_sim_measure (str): (Optional) This column indicates which item similarity measure to be used. + Available measures include item_cooccurrence_count (default choice) and item_feature_vector. + col_user (str): User id column name. + col_item (str): Item id column name. + col_relevance (str): Optional. This column indicates whether the recommended item is actually + relevant to the user or not. + """ + + self.train_df = train_df.select(col_user, col_item) + self.col_user = col_user + self.col_item = col_item + self.sim_col = DEFAULT_SIMILARITY_COL + self.df_cosine_similarity = None + self.df_user_item_serendipity = None + self.df_user_serendipity = None + self.avg_serendipity = None + self.df_item_novelty = None + self.avg_novelty = None + self.df_intralist_similarity = None + self.df_user_diversity = None + self.avg_diversity = None + self.item_feature_df = item_feature_df + self.item_sim_measure = item_sim_measure + + if col_relevance is None: + self.col_relevance = DEFAULT_RELEVANCE_COL + # relevance term, default is 1 (relevant) for all + self.reco_df = reco_df.select( + col_user, col_item, F.lit(1.0).alias(self.col_relevance) + ) + else: + self.col_relevance = col_relevance + self.reco_df = reco_df.select( + col_user, col_item, F.col(self.col_relevance).cast(DoubleType()) + ) + + if self.item_sim_measure == "item_feature_vector": + self.col_item_features = DEFAULT_ITEM_FEATURES_COL + required_schema = StructType( + ( + StructField(self.col_item, IntegerType()), + StructField(self.col_item_features, VectorUDT()), + ) + ) + if self.item_feature_df is not None: + if str(required_schema) != str(item_feature_df.schema): + raise Exception( + "Incorrect schema! item_feature_df should have schema " + f"{str(required_schema)} but have {str(item_feature_df.schema)}" + ) + else: + raise Exception( + "item_feature_df not specified! item_feature_df must be provided " + "if choosing to use item_feature_vector to calculate item similarity. " + f"item_feature_df should have schema {str(required_schema)}" + ) + + # check if reco_df contains any user_item pairs that are already shown in train_df + count_intersection = ( + self.train_df.select(self.col_user, self.col_item) + .intersect(self.reco_df.select(self.col_user, self.col_item)) + .count() + ) + + if count_intersection != 0: + raise Exception( + "reco_df should not contain any user_item pairs that are already shown in train_df" + ) + + def _get_pairwise_items(self, df): + """Get pairwise combinations of items per user (ignoring duplicate pairs [1,2] == [2,1])""" + return ( + df.select(self.col_user, F.col(self.col_item).alias("i1")) + .join( + df.select( + F.col(self.col_user).alias("_user"), + F.col(self.col_item).alias("i2"), + ), + (F.col(self.col_user) == F.col("_user")) & (F.col("i1") <= F.col("i2")), + ) + .select(self.col_user, "i1", "i2") + ) + + def _get_cosine_similarity(self, n_partitions=200): + if self.item_sim_measure == "item_cooccurrence_count": + # calculate item-item similarity based on item co-occurrence count + self._get_cooccurrence_similarity(n_partitions) + elif self.item_sim_measure == "item_feature_vector": + # calculate item-item similarity based on item feature vectors + self._get_item_feature_similarity(n_partitions) + else: + raise Exception( + "item_sim_measure not recognized! The available options include 'item_cooccurrence_count' and 'item_feature_vector'." + ) + return self.df_cosine_similarity + + def _get_cooccurrence_similarity(self, n_partitions): + """Cosine similarity metric from + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + The item indexes in the result are such that i1 <= i2. + """ + if self.df_cosine_similarity is None: + pairs = self._get_pairwise_items(df=self.train_df) + item_count = self.train_df.groupBy(self.col_item).count() + + self.df_cosine_similarity = ( + pairs.groupBy("i1", "i2") + .count() + .join( + item_count.select( + F.col(self.col_item).alias("i1"), + F.pow(F.col("count"), 0.5).alias("i1_sqrt_count"), + ), + on="i1", + ) + .join( + item_count.select( + F.col(self.col_item).alias("i2"), + F.pow(F.col("count"), 0.5).alias("i2_sqrt_count"), + ), + on="i2", + ) + .select( + "i1", + "i2", + ( + F.col("count") + / (F.col("i1_sqrt_count") * F.col("i2_sqrt_count")) + ).alias(self.sim_col), + ) + .repartition(n_partitions, "i1", "i2") + ) + return self.df_cosine_similarity + + @staticmethod + @udf(returnType=DoubleType()) + def sim_cos(v1, v2): + p = 2 + return float(v1.dot(v2)) / float(v1.norm(p) * v2.norm(p)) + + def _get_item_feature_similarity(self, n_partitions): + """Cosine similarity metric based on item feature vectors + + The item indexes in the result are such that i1 <= i2. + """ + if self.df_cosine_similarity is None: + self.df_cosine_similarity = ( + self.item_feature_df.select( + F.col(self.col_item).alias("i1"), + F.col(self.col_item_features).alias("f1"), + ) + .join( + self.item_feature_df.select( + F.col(self.col_item).alias("i2"), + F.col(self.col_item_features).alias("f2"), + ), + (F.col("i1") <= F.col("i2")), + ) + .select("i1", "i2", self.sim_cos("f1", "f2").alias("sim")) + .sort("i1", "i2") + .repartition(n_partitions, "i1", "i2") + ) + return self.df_cosine_similarity + + # Diversity metrics + def _get_intralist_similarity(self, df): + """Intra-list similarity from + + :Citation: + + "Improving Recommendation Lists Through Topic Diversification", + Ziegler, McNee, Konstan and Lausen, 2005. + """ + if self.df_intralist_similarity is None: + pairs = self._get_pairwise_items(df=df) + similarity_df = self._get_cosine_similarity() + # Fillna(0) is needed in the cases where similarity_df does not have an entry for a pair of items. + # e.g. i1 and i2 have never occurred together. + self.df_intralist_similarity = ( + pairs.join(similarity_df, on=["i1", "i2"], how="left") + .fillna(0) + .filter(F.col("i1") != F.col("i2")) + .groupBy(self.col_user) + .agg(F.mean(self.sim_col).alias("avg_il_sim")) + .select(self.col_user, "avg_il_sim") + ) + return self.df_intralist_similarity + +
[docs] def user_diversity(self): + """Calculate average diversity of recommendations for each user. + The metric definition is based on formula (3) in the following reference: + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + Returns: + pyspark.sql.dataframe.DataFrame: A dataframe with the following columns: col_user, user_diversity. + """ + if self.df_user_diversity is None: + self.df_intralist_similarity = self._get_intralist_similarity(self.reco_df) + self.df_user_diversity = ( + self.df_intralist_similarity.withColumn( + "user_diversity", 1 - F.col("avg_il_sim") + ) + .select(self.col_user, "user_diversity") + .orderBy(self.col_user) + ) + return self.df_user_diversity
+ +
[docs] def diversity(self): + """Calculate average diversity of recommendations across all users. + + Returns: + float: diversity. + """ + if self.avg_diversity is None: + self.df_user_diversity = self.user_diversity() + self.avg_diversity = self.df_user_diversity.agg( + {"user_diversity": "mean"} + ).first()[0] + return self.avg_diversity
+ + # Novelty metrics +
[docs] def historical_item_novelty(self): + """Calculate novelty for each item. Novelty is computed as the minus logarithm of + (number of interactions with item / total number of interactions). The definition of the metric + is based on the following reference using the choice model (eqs. 1 and 6): + + :Citation: + + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 + + The novelty of an item can be defined relative to a set of observed events on the set of all items. + These can be events of user choice (item "is picked" by a random user) or user discovery + (item "is known" to a random user). The above definition of novelty reflects a factor of item popularity. + High novelty values correspond to long-tail items in the density function, that few users have interacted + with and low novelty values correspond to popular head items. + + Returns: + pyspark.sql.dataframe.DataFrame: A dataframe with the following columns: col_item, item_novelty. + """ + if self.df_item_novelty is None: + n_records = self.train_df.count() + self.df_item_novelty = ( + self.train_df.groupBy(self.col_item) + .count() + .withColumn("item_novelty", -F.log2(F.col("count") / n_records)) + .select(self.col_item, "item_novelty") + .orderBy(self.col_item) + ) + return self.df_item_novelty
+ +
[docs] def novelty(self): + """Calculate the average novelty in a list of recommended items (this assumes that the recommendation list + is already computed). Follows section 5 from + + :Citation: + + P. Castells, S. Vargas, and J. Wang, Novelty and diversity metrics for recommender systems: + choice, discovery and relevance, ECIR 2011 + + Returns: + pyspark.sql.dataframe.DataFrame: A dataframe with following columns: novelty. + """ + if self.avg_novelty is None: + self.df_item_novelty = self.historical_item_novelty() + n_recommendations = self.reco_df.count() + self.avg_novelty = ( + self.reco_df.groupBy(self.col_item) + .count() + .join(self.df_item_novelty, self.col_item) + .selectExpr("sum(count * item_novelty)") + .first()[0] + / n_recommendations + ) + return self.avg_novelty
+ + # Serendipity metrics +
[docs] def user_item_serendipity(self): + """Calculate serendipity of each item in the recommendations for each user. + The metric definition is based on the following references: + + :Citation: + + Y.C. Zhang, D.Ó. Séaghdha, D. Quercia and T. Jambor, Auralist: + introducing serendipity into music recommendation, WSDM 2012 + + Eugene Yan, Serendipity: Accuracy’s unpopular best friend in Recommender Systems, + eugeneyan.com, April 2020 + + Returns: + pyspark.sql.dataframe.DataFrame: A dataframe with columns: col_user, col_item, user_item_serendipity. + """ + # for every col_user, col_item in reco_df, join all interacted items from train_df. + # These interacted items are repeated for each item in reco_df for a specific user. + if self.df_user_item_serendipity is None: + self.df_cosine_similarity = self._get_cosine_similarity() + self.df_user_item_serendipity = ( + self.reco_df.select( + self.col_user, + self.col_item, + F.col(self.col_item).alias( + "reco_item_tmp" + ), # duplicate col_item to keep + ) + .join( + self.train_df.select( + self.col_user, F.col(self.col_item).alias("train_item_tmp") + ), + on=[self.col_user], + ) + .select( + self.col_user, + self.col_item, + F.least(F.col("reco_item_tmp"), F.col("train_item_tmp")).alias( + "i1" + ), + F.greatest(F.col("reco_item_tmp"), F.col("train_item_tmp")).alias( + "i2" + ), + ) + .join(self.df_cosine_similarity, on=["i1", "i2"], how="left") + .fillna(0) + .groupBy(self.col_user, self.col_item) + .agg(F.mean(self.sim_col).alias("avg_item2interactedHistory_sim")) + .join(self.reco_df, on=[self.col_user, self.col_item]) + .withColumn( + "user_item_serendipity", + (1 - F.col("avg_item2interactedHistory_sim")) + * F.col(self.col_relevance), + ) + .select(self.col_user, self.col_item, "user_item_serendipity") + .orderBy(self.col_user, self.col_item) + ) + return self.df_user_item_serendipity
+ +
[docs] def user_serendipity(self): + """Calculate average serendipity for each user's recommendations. + + Returns: + pyspark.sql.dataframe.DataFrame: A dataframe with following columns: col_user, user_serendipity. + """ + if self.df_user_serendipity is None: + self.df_user_item_serendipity = self.user_item_serendipity() + self.df_user_serendipity = ( + self.df_user_item_serendipity.groupBy(self.col_user) + .agg(F.mean("user_item_serendipity").alias("user_serendipity")) + .orderBy(self.col_user) + ) + return self.df_user_serendipity
+ +
[docs] def serendipity(self): + """Calculate average serendipity for recommendations across all users. + + Returns: + float: serendipity. + """ + if self.avg_serendipity is None: + self.df_user_serendipity = self.user_serendipity() + self.avg_serendipity = self.df_user_serendipity.agg( + {"user_serendipity": "mean"} + ).first()[0] + return self.avg_serendipity
+ + # Coverage metrics +
[docs] def catalog_coverage(self): + """Calculate catalog coverage for recommendations across all users. + The metric definition is based on the "catalog coverage" definition in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Returns: + float: catalog coverage + """ + # distinct item count in reco_df + count_distinct_item_reco = self.reco_df.select(self.col_item).distinct().count() + # distinct item count in train_df + count_distinct_item_train = ( + self.train_df.select(self.col_item).distinct().count() + ) + + # catalog coverage + c_coverage = count_distinct_item_reco / count_distinct_item_train + return c_coverage
+ +
[docs] def distributional_coverage(self): + """Calculate distributional coverage for recommendations across all users. + The metric definition is based on formula (21) in the following reference: + + :Citation: + + G. Shani and A. Gunawardana, Evaluating Recommendation Systems, + Recommender Systems Handbook pp. 257-297, 2010. + + Returns: + float: distributional coverage + """ + # In reco_df, how many times each col_item is being recommended + df_itemcnt_reco = self.reco_df.groupBy(self.col_item).count() + + # the number of total recommendations + count_row_reco = self.reco_df.count() + df_entropy = df_itemcnt_reco.withColumn( + "p(i)", F.col("count") / count_row_reco + ).withColumn("entropy(i)", F.col("p(i)") * F.log2(F.col("p(i)"))) + # distributional coverage + d_coverage = -df_entropy.agg(F.sum("entropy(i)")).collect()[0][0] + + return d_coverage
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/cornac/cornac_utils.html b/_modules/recommenders/models/cornac/cornac_utils.html new file mode 100644 index 0000000000..f2d3475be5 --- /dev/null +++ b/_modules/recommenders/models/cornac/cornac_utils.html @@ -0,0 +1,485 @@ + + + + + + + + + + + recommenders.models.cornac.cornac_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.cornac.cornac_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import pandas as pd
+import numpy as np
+
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_PREDICTION_COL,
+)
+
+
+
[docs]def predict( + model, + data, + usercol=DEFAULT_USER_COL, + itemcol=DEFAULT_ITEM_COL, + predcol=DEFAULT_PREDICTION_COL, +): + """Computes predictions of a recommender model from Cornac on the data. + Can be used for computing rating metrics like RMSE. + + Args: + model (cornac.models.Recommender): A recommender model from Cornac + data (pandas.DataFrame): The data on which to predict + usercol (str): Name of the user column + itemcol (str): Name of the item column + + Returns: + pandas.DataFrame: Dataframe with usercol, itemcol, predcol + """ + uid_map = model.train_set.uid_map + iid_map = model.train_set.iid_map + predictions = [ + [ + getattr(row, usercol), + getattr(row, itemcol), + model.rate( + user_idx=uid_map.get(getattr(row, usercol), len(uid_map)), + item_idx=iid_map.get(getattr(row, itemcol), len(iid_map)), + ), + ] + for row in data.itertuples() + ] + predictions = pd.DataFrame(data=predictions, columns=[usercol, itemcol, predcol]) + return predictions
+ + +
[docs]def predict_ranking( + model, + data, + usercol=DEFAULT_USER_COL, + itemcol=DEFAULT_ITEM_COL, + predcol=DEFAULT_PREDICTION_COL, + remove_seen=False, +): + """Computes predictions of recommender model from Cornac on all users and items in data. + It can be used for computing ranking metrics like NDCG. + + Args: + model (cornac.models.Recommender): A recommender model from Cornac + data (pandas.DataFrame): The data from which to get the users and items + usercol (str): Name of the user column + itemcol (str): Name of the item column + remove_seen (bool): Flag to remove (user, item) pairs seen in the training data + + Returns: + pandas.DataFrame: Dataframe with usercol, itemcol, predcol + """ + users, items, preds = [], [], [] + item = list(model.train_set.iid_map.keys()) + for uid, user_idx in model.train_set.uid_map.items(): + user = [uid] * len(item) + users.extend(user) + items.extend(item) + preds.extend(model.score(user_idx).tolist()) + + all_predictions = pd.DataFrame( + data={usercol: users, itemcol: items, predcol: preds} + ) + + if remove_seen: + tempdf = pd.concat( + [ + data[[usercol, itemcol]], + pd.DataFrame( + data=np.ones(data.shape[0]), columns=["dummycol"], index=data.index + ), + ], + axis=1, + ) + merged = pd.merge(tempdf, all_predictions, on=[usercol, itemcol], how="outer") + return merged[merged["dummycol"].isnull()].drop("dummycol", axis=1) + else: + return all_predictions
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/DataModel/ImplicitCF.html b/_modules/recommenders/models/deeprec/DataModel/ImplicitCF.html new file mode 100644 index 0000000000..4ac346f4d9 --- /dev/null +++ b/_modules/recommenders/models/deeprec/DataModel/ImplicitCF.html @@ -0,0 +1,616 @@ + + + + + + + + + + + recommenders.models.deeprec.DataModel.ImplicitCF — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.DataModel.ImplicitCF

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import random
+import numpy as np
+import pandas as pd
+import scipy.sparse as sp
+
+from recommenders.utils.constants import (
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_PREDICTION_COL,
+)
+
+
+
[docs]class ImplicitCF(object): + """Data processing class for GCN models which use implicit feedback. + + Initialize train and test set, create normalized adjacency matrix and sample data for training epochs. + + """ + + def __init__( + self, + train, + test=None, + adj_dir=None, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_prediction=DEFAULT_PREDICTION_COL, + seed=None, + ): + """Constructor + + Args: + adj_dir (str): Directory to save / load adjacency matrices. If it is None, adjacency + matrices will be created and will not be saved. + train (pandas.DataFrame): Training data with at least columns (col_user, col_item, col_rating). + test (pandas.DataFrame): Test data with at least columns (col_user, col_item, col_rating). + test can be None, if so, we only process the training data. + col_user (str): User column name. + col_item (str): Item column name. + col_rating (str): Rating column name. + seed (int): Seed. + + """ + self.user_idx = None + self.item_idx = None + self.adj_dir = adj_dir + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_prediction = col_prediction + self.train, self.test = self._data_processing(train, test) + self._init_train_data() + + random.seed(seed) + + def _data_processing(self, train, test): + """Process the dataset to reindex userID and itemID and only keep records with ratings greater than 0. + + Args: + train (pandas.DataFrame): Training data with at least columns (col_user, col_item, col_rating). + test (pandas.DataFrame): Test data with at least columns (col_user, col_item, col_rating). + test can be None, if so, we only process the training data. + + Returns: + list: train and test pandas.DataFrame Dataset, which have been reindexed and filtered. + + """ + df = ( + train + if test is None + else pd.concat([train, test], axis=0, ignore_index=True) + ) + + if self.user_idx is None: + user_idx = df[[self.col_user]].drop_duplicates().reindex() + user_idx[self.col_user + "_idx"] = np.arange(len(user_idx)) + self.n_users = len(user_idx) + self.user_idx = user_idx + + self.user2id = dict( + zip(user_idx[self.col_user], user_idx[self.col_user + "_idx"]) + ) + self.id2user = dict( + zip(user_idx[self.col_user + "_idx"], user_idx[self.col_user]) + ) + + if self.item_idx is None: + item_idx = df[[self.col_item]].drop_duplicates() + item_idx[self.col_item + "_idx"] = np.arange(len(item_idx)) + self.n_items = len(item_idx) + self.item_idx = item_idx + + self.item2id = dict( + zip(item_idx[self.col_item], item_idx[self.col_item + "_idx"]) + ) + self.id2item = dict( + zip(item_idx[self.col_item + "_idx"], item_idx[self.col_item]) + ) + + return self._reindex(train), self._reindex(test) + + def _reindex(self, df): + """Process the dataset to reindex userID and itemID and only keep records with ratings greater than 0. + + Args: + df (pandas.DataFrame): dataframe with at least columns (col_user, col_item, col_rating). + + Returns: + list: train and test pandas.DataFrame Dataset, which have been reindexed and filtered. + + """ + + if df is None: + return None + + df = pd.merge(df, self.user_idx, on=self.col_user, how="left") + df = pd.merge(df, self.item_idx, on=self.col_item, how="left") + + df = df[df[self.col_rating] > 0] + + df_reindex = df[ + [self.col_user + "_idx", self.col_item + "_idx", self.col_rating] + ] + df_reindex.columns = [self.col_user, self.col_item, self.col_rating] + + return df_reindex + + def _init_train_data(self): + """Record items interated with each user in a dataframe self.interact_status, and create adjacency + matrix self.R. + + """ + self.interact_status = ( + self.train.groupby(self.col_user)[self.col_item] + .apply(set) + .reset_index() + .rename(columns={self.col_item: self.col_item + "_interacted"}) + ) + self.R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32) + self.R[self.train[self.col_user], self.train[self.col_item]] = 1.0 + +
[docs] def get_norm_adj_mat(self): + """Load normalized adjacency matrix if it exists, otherwise create (and save) it. + + Returns: + scipy.sparse.csr_matrix: Normalized adjacency matrix. + + """ + try: + if self.adj_dir is None: + raise FileNotFoundError + norm_adj_mat = sp.load_npz(self.adj_dir + "/norm_adj_mat.npz") + print("Already load norm adj matrix.") + + except FileNotFoundError: + norm_adj_mat = self.create_norm_adj_mat() + if self.adj_dir is not None: + sp.save_npz(self.adj_dir + "/norm_adj_mat.npz", norm_adj_mat) + return norm_adj_mat
+ +
[docs] def create_norm_adj_mat(self): + """Create normalized adjacency matrix. + + Returns: + scipy.sparse.csr_matrix: Normalized adjacency matrix. + + """ + adj_mat = sp.dok_matrix( + (self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32 + ) + adj_mat = adj_mat.tolil() + R = self.R.tolil() + + adj_mat[: self.n_users, self.n_users :] = R + adj_mat[self.n_users :, : self.n_users] = R.T + adj_mat = adj_mat.todok() + print("Already create adjacency matrix.") + + rowsum = np.array(adj_mat.sum(1)) + d_inv = np.power(rowsum + 1e-9, -0.5).flatten() + d_inv[np.isinf(d_inv)] = 0.0 + d_mat_inv = sp.diags(d_inv) + norm_adj_mat = d_mat_inv.dot(adj_mat) + norm_adj_mat = norm_adj_mat.dot(d_mat_inv) + print("Already normalize adjacency matrix.") + + return norm_adj_mat.tocsr()
+ +
[docs] def train_loader(self, batch_size): + """Sample train data every batch. One positive item and one negative item sampled for each user. + + Args: + batch_size (int): Batch size of users. + + Returns: + numpy.ndarray, numpy.ndarray, numpy.ndarray: + - Sampled users. + - Sampled positive items. + - Sampled negative items. + """ + + def sample_neg(x): + while True: + neg_id = random.randint(0, self.n_items - 1) + if neg_id not in x: + return neg_id + + indices = range(self.n_users) + if self.n_users < batch_size: + users = [random.choice(indices) for _ in range(batch_size)] + else: + users = random.sample(indices, batch_size) + + interact = self.interact_status.iloc[users] + pos_items = interact[self.col_item + "_interacted"].apply( + lambda x: random.choice(list(x)) + ) + neg_items = interact[self.col_item + "_interacted"].apply( + lambda x: sample_neg(x) + ) + + return np.array(users), np.array(pos_items), np.array(neg_items)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/deeprec_utils.html b/_modules/recommenders/models/deeprec/deeprec_utils.html new file mode 100644 index 0000000000..e402173005 --- /dev/null +++ b/_modules/recommenders/models/deeprec/deeprec_utils.html @@ -0,0 +1,1005 @@ + + + + + + + + + + + recommenders.models.deeprec.deeprec_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.deeprec_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+import os
+from sklearn.metrics import (
+    roc_auc_score,
+    log_loss,
+    mean_squared_error,
+    accuracy_score,
+    f1_score,
+)
+import numpy as np
+import yaml
+import zipfile
+import pickle as pkl
+
+from recommenders.datasets.download_utils import maybe_download
+
+
+
[docs]def flat_config(config): + """Flat config loaded from a yaml file to a flat dict. + + Args: + config (dict): Configuration loaded from a yaml file. + + Returns: + dict: Configuration dictionary. + """ + f_config = {} + category = config.keys() + for cate in category: + for key, val in config[cate].items(): + f_config[key] = val + return f_config
+ + +
[docs]def check_type(config): + """Check that the config parameters are the correct type + + Args: + config (dict): Configuration dictionary. + + Raises: + TypeError: If the parameters are not the correct type. + """ + + int_parameters = [ + "word_size", + "entity_size", + "doc_size", + "history_size", + "FEATURE_COUNT", + "FIELD_COUNT", + "dim", + "epochs", + "batch_size", + "show_step", + "save_epoch", + "PAIR_NUM", + "DNN_FIELD_NUM", + "attention_layer_sizes", + "n_user", + "n_item", + "n_user_attr", + "n_item_attr", + "item_embedding_dim", + "cate_embedding_dim", + "user_embedding_dim", + "max_seq_length", + "hidden_size", + "T", + "L", + "n_v", + "n_h", + "kernel_size", + "min_seq_length", + "attention_size", + "epochs", + "batch_size", + "show_step", + "save_epoch", + "train_num_ngs", + ] + for param in int_parameters: + if param in config and not isinstance(config[param], int): + raise TypeError("Parameters {0} must be int".format(param)) + + float_parameters = [ + "init_value", + "learning_rate", + "embed_l2", + "embed_l1", + "layer_l2", + "layer_l1", + "mu", + ] + for param in float_parameters: + if param in config and not isinstance(config[param], float): + raise TypeError("Parameters {0} must be float".format(param)) + + str_parameters = [ + "train_file", + "eval_file", + "test_file", + "infer_file", + "method", + "load_model_name", + "infer_model_name", + "loss", + "optimizer", + "init_method", + "attention_activation", + "user_vocab", + "item_vocab", + "cate_vocab", + ] + for param in str_parameters: + if param in config and not isinstance(config[param], str): + raise TypeError("Parameters {0} must be str".format(param)) + + list_parameters = [ + "layer_sizes", + "activation", + "dropout", + "att_fcn_layer_sizes", + "dilations", + ] + for param in list_parameters: + if param in config and not isinstance(config[param], list): + raise TypeError("Parameters {0} must be list".format(param))
+ + +
[docs]def check_nn_config(f_config): + """Check neural networks configuration. + + Args: + f_config (dict): Neural network configuration. + + Raises: + ValueError: If the parameters are not correct. + """ + if f_config["model_type"] in ["fm", "FM"]: + required_parameters = ["FEATURE_COUNT", "dim", "loss", "data_format", "method"] + elif f_config["model_type"] in ["lr", "LR"]: + required_parameters = ["FEATURE_COUNT", "loss", "data_format", "method"] + elif f_config["model_type"] in ["dkn", "DKN"]: + required_parameters = [ + "doc_size", + "history_size", + "wordEmb_file", + "entityEmb_file", + "contextEmb_file", + "news_feature_file", + "user_history_file", + "word_size", + "entity_size", + "use_entity", + "use_context", + "data_format", + "dim", + "layer_sizes", + "activation", + "attention_activation", + "attention_activation", + "attention_dropout", + "loss", + "data_format", + "dropout", + "method", + "num_filters", + "filter_sizes", + ] + elif f_config["model_type"] in ["exDeepFM", "xDeepFM"]: + required_parameters = [ + "FIELD_COUNT", + "FEATURE_COUNT", + "method", + "dim", + "layer_sizes", + "cross_layer_sizes", + "activation", + "loss", + "data_format", + "dropout", + ] + if f_config["model_type"] in ["gru", "GRU"]: + required_parameters = [ + "item_embedding_dim", + "cate_embedding_dim", + "max_seq_length", + "loss", + "method", + "user_vocab", + "item_vocab", + "cate_vocab", + "hidden_size", + ] + elif f_config["model_type"] in ["caser", "CASER", "Caser"]: + required_parameters = [ + "item_embedding_dim", + "cate_embedding_dim", + "user_embedding_dim", + "max_seq_length", + "loss", + "method", + "user_vocab", + "item_vocab", + "cate_vocab", + "T", + "L", + "n_v", + "n_h", + "min_seq_length", + ] + elif f_config["model_type"] in ["asvd", "ASVD", "a2svd", "A2SVD"]: + required_parameters = [ + "item_embedding_dim", + "cate_embedding_dim", + "max_seq_length", + "loss", + "method", + "user_vocab", + "item_vocab", + "cate_vocab", + ] + elif f_config["model_type"] in ["slirec", "sli_rec", "SLI_REC", "Sli_rec"]: + required_parameters = [ + "item_embedding_dim", + "cate_embedding_dim", + "max_seq_length", + "loss", + "method", + "user_vocab", + "item_vocab", + "cate_vocab", + "attention_size", + "hidden_size", + "att_fcn_layer_sizes", + ] + elif f_config["model_type"] in [ + "nextitnet", + "next_it_net", + "NextItNet", + "NEXT_IT_NET", + ]: + required_parameters = [ + "item_embedding_dim", + "cate_embedding_dim", + "user_embedding_dim", + "max_seq_length", + "loss", + "method", + "user_vocab", + "item_vocab", + "cate_vocab", + "dilations", + "kernel_size", + "min_seq_length", + ] + else: + required_parameters = [] + + # check required parameters + for param in required_parameters: + if param not in f_config: + raise ValueError("Parameters {0} must be set".format(param)) + + if f_config["model_type"] in ["exDeepFM", "xDeepFM"]: + if f_config["data_format"] != "ffm": + raise ValueError( + "For xDeepFM model, data format must be 'ffm', but your set is {0}".format( + f_config["data_format"] + ) + ) + elif f_config["model_type"] in ["dkn", "DKN"]: + if f_config["data_format"] != "dkn": + raise ValueError( + "For dkn model, data format must be 'dkn', but your set is {0}".format( + f_config["data_format"] + ) + ) + check_type(f_config)
+ + +
[docs]def load_yaml(filename): + """Load a yaml file. + + Args: + filename (str): Filename. + + Returns: + dict: Dictionary. + """ + try: + with open(filename, "r") as f: + config = yaml.load(f, yaml.SafeLoader) + return config + except FileNotFoundError: # for file not found + raise + except Exception: # for other exceptions + raise IOError("load {0} error!".format(filename))
+ + +
[docs]class HParams: + """Class for holding hyperparameters for DeepRec algorithms.""" + + def __init__(self, hparams_dict): + """Create an HParams object from a dictionary of hyperparameter values. + + Args: + hparams_dict (dict): Dictionary with the model hyperparameters. + """ + for val in hparams_dict.values(): + if not ( + isinstance(val, int) + or isinstance(val, float) + or isinstance(val, str) + or isinstance(val, list) + ): + raise ValueError( + "Hyperparameter value {} should be integer, float, string or list.".format( + val + ) + ) + self._values = hparams_dict + for hparam in hparams_dict: + setattr(self, hparam, hparams_dict[hparam]) + + def __repr__(self): + return "HParams object with values {}".format(self._values.__repr__()) + +
[docs] def values(self): + """Return the hyperparameter values as a dictionary. + + Returns: + dict: Dictionary with the hyperparameter values. + """ + return self._values
+ + +
[docs]def create_hparams(flags): + """Create the model hyperparameters. + + Args: + flags (dict): Dictionary with the model requirements. + + Returns: + HParams: Hyperparameter object. + """ + init_dict = { + # dkn + "use_entity": True, + "use_context": True, + # model + "cross_activation": "identity", + "user_dropout": False, + "dropout": [0.0], + "attention_dropout": 0.0, + "load_saved_model": False, + "fast_CIN_d": 0, + "use_Linear_part": False, + "use_FM_part": False, + "use_CIN_part": False, + "use_DNN_part": False, + # train + "init_method": "tnormal", + "init_value": 0.01, + "embed_l2": 0.0, + "embed_l1": 0.0, + "layer_l2": 0.0, + "layer_l1": 0.0, + "cross_l2": 0.0, + "cross_l1": 0.0, + "reg_kg": 0.0, + "learning_rate": 0.001, + "lr_rs": 1, + "lr_kg": 0.5, + "kg_training_interval": 5, + "max_grad_norm": 2, + "is_clip_norm": 0, + "dtype": 32, + "optimizer": "adam", + "epochs": 10, + "batch_size": 1, + "enable_BN": False, + # show info + "show_step": 1, + "save_model": True, + "save_epoch": 5, + "write_tfevents": False, + # sequential + "train_num_ngs": 4, + "need_sample": True, + "embedding_dropout": 0.0, + "EARLY_STOP": 100, + # caser, + "min_seq_length": 1, + # sum + "slots": 5, + "cell": "SUM", + } + init_dict.update(flags) + return HParams(init_dict)
+ + +
[docs]def prepare_hparams(yaml_file=None, **kwargs): + """Prepare the model hyperparameters and check that all have the correct value. + + Args: + yaml_file (str): YAML file as configuration. + + Returns: + HParams: Hyperparameter object. + """ + if yaml_file is not None: + config = load_yaml(yaml_file) + config = flat_config(config) + else: + config = {} + + if kwargs: + for name, value in kwargs.items(): + config[name] = value + + check_nn_config(config) + return create_hparams(config)
+ + +
[docs]def download_deeprec_resources(azure_container_url, data_path, remote_resource_name): + """Download resources. + + Args: + azure_container_url (str): URL of Azure container. + data_path (str): Path to download the resources. + remote_resource_name (str): Name of the resource. + """ + os.makedirs(data_path, exist_ok=True) + remote_path = azure_container_url + remote_resource_name + maybe_download(remote_path, remote_resource_name, data_path) + zip_ref = zipfile.ZipFile(os.path.join(data_path, remote_resource_name), "r") + zip_ref.extractall(data_path) + zip_ref.close() + os.remove(os.path.join(data_path, remote_resource_name))
+ + +
[docs]def mrr_score(y_true, y_score): + """Computing mrr score metric. + + Args: + y_true (np.ndarray): Ground-truth labels. + y_score (np.ndarray): Predicted labels. + + Returns: + numpy.ndarray: mrr scores. + """ + order = np.argsort(y_score)[::-1] + y_true = np.take(y_true, order) + rr_score = y_true / (np.arange(len(y_true)) + 1) + return np.sum(rr_score) / np.sum(y_true)
+ + +
[docs]def ndcg_score(y_true, y_score, k=10): + """Computing ndcg score metric at k. + + Args: + y_true (np.ndarray): Ground-truth labels. + y_score (np.ndarray): Predicted labels. + + Returns: + numpy.ndarray: ndcg scores. + """ + best = dcg_score(y_true, y_true, k) + actual = dcg_score(y_true, y_score, k) + return actual / best
+ + +
[docs]def hit_score(y_true, y_score, k=10): + """Computing hit score metric at k. + + Args: + y_true (np.ndarray): ground-truth labels. + y_score (np.ndarray): predicted labels. + + Returns: + np.ndarray: hit score. + """ + ground_truth = np.where(y_true == 1)[0] + argsort = np.argsort(y_score)[::-1][:k] + for idx in argsort: + if idx in ground_truth: + return 1 + return 0
+ + +
[docs]def dcg_score(y_true, y_score, k=10): + """Computing dcg score metric at k. + + Args: + y_true (np.ndarray): Ground-truth labels. + y_score (np.ndarray): Predicted labels. + + Returns: + np.ndarray: dcg scores. + """ + k = min(np.shape(y_true)[-1], k) + order = np.argsort(y_score)[::-1] + y_true = np.take(y_true, order[:k]) + gains = 2**y_true - 1 + discounts = np.log2(np.arange(len(y_true)) + 2) + return np.sum(gains / discounts)
+ + +
[docs]def cal_metric(labels, preds, metrics): + """Calculate metrics. + + Available options are: `auc`, `rmse`, `logloss`, `acc` (accurary), `f1`, `mean_mrr`, + `ndcg` (format like: ndcg@2;4;6;8), `hit` (format like: hit@2;4;6;8), `group_auc`. + + Args: + labels (array-like): Labels. + preds (array-like): Predictions. + metrics (list): List of metric names. + + Return: + dict: Metrics. + + Examples: + >>> cal_metric(labels, preds, ["ndcg@2;4;6", "group_auc"]) + {'ndcg@2': 0.4026, 'ndcg@4': 0.4953, 'ndcg@6': 0.5346, 'group_auc': 0.8096} + + """ + res = {} + for metric in metrics: + if metric == "auc": + auc = roc_auc_score(np.asarray(labels), np.asarray(preds)) + res["auc"] = round(auc, 4) + elif metric == "rmse": + rmse = mean_squared_error(np.asarray(labels), np.asarray(preds)) + res["rmse"] = np.sqrt(round(rmse, 4)) + elif metric == "logloss": + # avoid logloss nan + preds = [max(min(p, 1.0 - 10e-12), 10e-12) for p in preds] + logloss = log_loss(np.asarray(labels), np.asarray(preds)) + res["logloss"] = round(logloss, 4) + elif metric == "acc": + pred = np.asarray(preds) + pred[pred >= 0.5] = 1 + pred[pred < 0.5] = 0 + acc = accuracy_score(np.asarray(labels), pred) + res["acc"] = round(acc, 4) + elif metric == "f1": + pred = np.asarray(preds) + pred[pred >= 0.5] = 1 + pred[pred < 0.5] = 0 + f1 = f1_score(np.asarray(labels), pred) + res["f1"] = round(f1, 4) + elif metric == "mean_mrr": + mean_mrr = np.mean( + [ + mrr_score(each_labels, each_preds) + for each_labels, each_preds in zip(labels, preds) + ] + ) + res["mean_mrr"] = round(mean_mrr, 4) + elif metric.startswith("ndcg"): # format like: ndcg@2;4;6;8 + ndcg_list = [1, 2] + ks = metric.split("@") + if len(ks) > 1: + ndcg_list = [int(token) for token in ks[1].split(";")] + for k in ndcg_list: + ndcg_temp = np.mean( + [ + ndcg_score(each_labels, each_preds, k) + for each_labels, each_preds in zip(labels, preds) + ] + ) + res["ndcg@{0}".format(k)] = round(ndcg_temp, 4) + elif metric.startswith("hit"): # format like: hit@2;4;6;8 + hit_list = [1, 2] + ks = metric.split("@") + if len(ks) > 1: + hit_list = [int(token) for token in ks[1].split(";")] + for k in hit_list: + hit_temp = np.mean( + [ + hit_score(each_labels, each_preds, k) + for each_labels, each_preds in zip(labels, preds) + ] + ) + res["hit@{0}".format(k)] = round(hit_temp, 4) + elif metric == "group_auc": + group_auc = np.mean( + [ + roc_auc_score(each_labels, each_preds) + for each_labels, each_preds in zip(labels, preds) + ] + ) + res["group_auc"] = round(group_auc, 4) + else: + raise ValueError("Metric {0} not defined".format(metric)) + return res
+ + +
[docs]def load_dict(filename): + """Load the vocabularies. + + Args: + filename (str): Filename of user, item or category vocabulary. + + Returns: + dict: A saved vocabulary. + """ + with open(filename, "rb") as f: + f_pkl = pkl.load(f) + return f_pkl
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/io/dkn_item2item_iterator.html b/_modules/recommenders/models/deeprec/io/dkn_item2item_iterator.html new file mode 100644 index 0000000000..2878d0a629 --- /dev/null +++ b/_modules/recommenders/models/deeprec/io/dkn_item2item_iterator.html @@ -0,0 +1,506 @@ + + + + + + + + + + + recommenders.models.deeprec.io.dkn_item2item_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.io.dkn_item2item_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+import tensorflow as tf
+from recommenders.models.deeprec.io.dkn_iterator import DKNTextIterator
+
+
+
[docs]class DKNItem2itemTextIterator(DKNTextIterator): + def __init__(self, hparams, graph): + """This new iterator is for DKN's item-to-item recommendations version. + The tutorial can be found `on this notebook <https://github.com/microsoft/recommenders/blob/main/examples/07_tutorials/KDD2020-tutorial/step4_run_dkn_item2item.ipynb>`_. + + Compared with user-to-item recommendations, we don't need the user behavior module. + So the placeholder can be simplified from the original DKNTextIterator. + + Args: + hparams (object): Global hyper-parameters. + graph (object): The running graph. + """ + self.hparams = hparams + self.graph = graph + self.neg_num = hparams.neg_num + self.batch_size = hparams.batch_size * (self.neg_num + 2) + self.doc_size = hparams.doc_size + with self.graph.as_default(): + self.candidate_news_index_batch = tf.compat.v1.placeholder( + tf.int64, [self.batch_size, self.doc_size], name="candidate_news_index" + ) + self.candidate_news_entity_index_batch = tf.compat.v1.placeholder( + tf.int64, + [self.batch_size, self.doc_size], + name="candidate_news_entity_index", + ) + + self._loading_nessary_files() + + def _loading_nessary_files(self): + """Only one feature file is needed: `news_feature_file`. + This function loads the news article's features into two dictionaries: `self.news_word_index` and `self.news_entity_index`. + """ + hparams = self.hparams + self.news_word_index = {} + self.news_entity_index = {} + with open(hparams.news_feature_file, "r") as rd: + while True: + line = rd.readline() + if not line: + break + newsid, word_index, entity_index = line.strip().split(" ") + self.news_word_index[newsid] = [ + int(item) for item in word_index.split(",") + ] + self.news_entity_index[newsid] = [ + int(item) for item in entity_index.split(",") + ] + +
[docs] def load_data_from_file(self, infile): + """This function will return a mini-batch of data with features, + by looking up `news_word_index` dictionary and `news_entity_index` dictionary according to the news article's ID. + + Args: + infile (str): File path. Each line of `infile` is a news article's ID. + + Yields: + dict, list, int: + - A dictionary that maps graph elements to numpy arrays. + - A list with news article's ID. + - Size of the data in a batch. + """ + newsid_list = [] + candidate_news_index_batch = [] + candidate_news_entity_index_batch = [] + cnt = 0 + with open(infile, "r") as rd: + while True: + line = rd.readline() + if not line: + break + newsid = line.strip() + word_index, entity_index = ( + self.news_word_index[newsid], + self.news_entity_index[newsid], + ) + newsid_list.append(newsid) + + candidate_news_index_batch.append(word_index) + candidate_news_entity_index_batch.append(entity_index) + + cnt += 1 + if cnt >= self.batch_size: + res = self._convert_infer_data( + candidate_news_index_batch, + candidate_news_entity_index_batch, + ) + data_size = self.batch_size + yield self.gen_infer_feed_dict(res), newsid_list, data_size + candidate_news_index_batch = [] + candidate_news_entity_index_batch = [] + newsid_list = [] + cnt = 0 + + if cnt > 0: + data_size = cnt + while cnt < self.batch_size: + candidate_news_index_batch.append( + candidate_news_index_batch[cnt % data_size] + ) + candidate_news_entity_index_batch.append( + candidate_news_entity_index_batch[cnt % data_size] + ) + cnt += 1 + res = self._convert_infer_data( + candidate_news_index_batch, + candidate_news_entity_index_batch, + ) + yield self.gen_infer_feed_dict(res), newsid_list, data_size
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/io/dkn_iterator.html b/_modules/recommenders/models/deeprec/io/dkn_iterator.html new file mode 100644 index 0000000000..11e86b7194 --- /dev/null +++ b/_modules/recommenders/models/deeprec/io/dkn_iterator.html @@ -0,0 +1,762 @@ + + + + + + + + + + + recommenders.models.deeprec.io.dkn_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.io.dkn_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import numpy as np
+
+from recommenders.models.deeprec.io.iterator import BaseIterator
+
+
+__all__ = ["DKNTextIterator"]
+
+
+
[docs]class DKNTextIterator(BaseIterator): + """Data loader for the DKN model. + DKN requires a special type of data format, where each instance contains a label, the candidate news article, + and user's clicked news article. Articles are represented by title words and title entities. Words and entities + are aligned. + + Iterator will not load the whole data into memory. Instead, it loads data into memory + per mini-batch, so that large files can be used as input data. + """ + + def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"): + """Initialize an iterator. Create necessary placeholders for the model. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as #_feature and #_field are there. + graph (object): the running graph. All created placeholder will be added to this graph. + col_spliter (str): column spliter in one line. + ID_spliter (str): ID spliter in one line. + """ + self.col_spliter = col_spliter + self.ID_spliter = ID_spliter + self.batch_size = hparams.batch_size + self.doc_size = hparams.doc_size + self.history_size = hparams.history_size + + self.graph = graph + with self.graph.as_default(): + self.labels = tf.compat.v1.placeholder(tf.float32, [None, 1], name="label") + self.candidate_news_index_batch = tf.compat.v1.placeholder( + tf.int64, [self.batch_size, self.doc_size], name="candidate_news_index" + ) + self.click_news_index_batch = tf.compat.v1.placeholder( + tf.int64, + [self.batch_size, self.history_size, self.doc_size], + name="click_news_index", + ) + self.candidate_news_entity_index_batch = tf.compat.v1.placeholder( + tf.int64, + [self.batch_size, self.doc_size], + name="candidate_news_entity_index", + ) + self.click_news_entity_index_batch = tf.compat.v1.placeholder( + tf.int64, + [self.batch_size, self.history_size, self.doc_size], + name="click_news_entity_index", + ) + self.news_word_index = {} + self.news_entity_index = {} + with tf.io.gfile.GFile(hparams.news_feature_file, "r") as rd: + for line in rd: + newsid, word_index, entity_index = line.strip().split(col_spliter) + self.news_word_index[newsid] = [ + int(item) for item in word_index.split(",") + ] + self.news_entity_index[newsid] = [ + int(item) for item in entity_index.split(",") + ] + self.user_history = {} + with tf.io.gfile.GFile(hparams.user_history_file, "r") as rd: + for line in rd: + if len(line.strip().split(col_spliter)) == 1: + userid = line.strip() + user_history = [] + else: + userid, user_history_string = line.strip().split(col_spliter) + user_history = user_history_string.split(",") + click_news_index = [] + click_news_entity_index = [] + if len(user_history) > self.history_size: + user_history = user_history[-self.history_size :] + for newsid in user_history: + click_news_index.append(self.news_word_index[newsid]) + click_news_entity_index.append(self.news_entity_index[newsid]) + for i in range(self.history_size - len(user_history)): + click_news_index.append(np.zeros(self.doc_size)) + click_news_entity_index.append(np.zeros(self.doc_size)) + self.user_history[userid] = (click_news_index, click_news_entity_index) + +
[docs] def parser_one_line(self, line): + """Parse one string line into feature values. + + Args: + line (str): a string indicating one instance + + Returns: + list: Parsed results including `label`, `candidate_news_index`, `click_news_index`, + `candidate_news_entity_index`, `click_news_entity_index`, `impression_id`. + + """ + impression_id = 0 + words = line.strip().split(self.ID_spliter) + if len(words) == 2: + impression_id = words[1].strip() + + cols = words[0].strip().split(self.col_spliter) + label = float(cols[0]) + + userid = cols[1] + candidate_news = cols[2] + + candidate_news_index = self.news_word_index[candidate_news] + candidate_news_entity_index = self.news_entity_index[candidate_news] + click_news_index = self.user_history[userid][0] + click_news_entity_index = self.user_history[userid][1] + + return ( + label, + candidate_news_index, + click_news_index, + candidate_news_entity_index, + click_news_entity_index, + impression_id, + )
+ +
[docs] def load_data_from_file(self, infile): + """Read and parse data from a file. + + Args: + infile (str): text input file. Each line in this file is an instance. + + Yields: + obj, list, int: + - An iterator that yields parsed results, in the format of graph `feed_dict`. + - Impression id list. + - Size of the data in a batch. + """ + candidate_news_index_batch = [] + click_news_index_batch = [] + candidate_news_entity_index_batch = [] + click_news_entity_index_batch = [] + label_list = [] + impression_id_list = [] + cnt = 0 + + with tf.io.gfile.GFile(infile, "r") as rd: + for line in rd: + ( + label, + candidate_news_index, + click_news_index, + candidate_news_entity_index, + click_news_entity_index, + impression_id, + ) = self.parser_one_line(line) + + candidate_news_index_batch.append(candidate_news_index) + click_news_index_batch.append(click_news_index) + candidate_news_entity_index_batch.append(candidate_news_entity_index) + click_news_entity_index_batch.append(click_news_entity_index) + label_list.append(label) + impression_id_list.append(impression_id) + + cnt += 1 + if cnt >= self.batch_size: + res = self._convert_data( + label_list, + candidate_news_index_batch, + click_news_index_batch, + candidate_news_entity_index_batch, + click_news_entity_index_batch, + impression_id_list, + ) + data_size = self.batch_size + yield self.gen_feed_dict(res), impression_id_list, data_size + candidate_news_index_batch = [] + click_news_index_batch = [] + candidate_news_entity_index_batch = [] + click_news_entity_index_batch = [] + label_list = [] + impression_id_list = [] + cnt = 0 + if cnt > 0: + data_size = cnt + while cnt < self.batch_size: + candidate_news_index_batch.append( + candidate_news_index_batch[cnt % data_size] + ) + click_news_index_batch.append( + click_news_index_batch[cnt % data_size] + ) + candidate_news_entity_index_batch.append( + candidate_news_entity_index_batch[cnt % data_size] + ) + click_news_entity_index_batch.append( + click_news_entity_index_batch[cnt % data_size] + ) + label_list.append(label_list[cnt % data_size]) + impression_id_list.append(impression_id_list[cnt % data_size]) + cnt += 1 + res = self._convert_data( + label_list, + candidate_news_index_batch, + click_news_index_batch, + candidate_news_entity_index_batch, + click_news_entity_index_batch, + impression_id_list, + ) + yield self.gen_feed_dict(res), impression_id_list, data_size
+ +
[docs] def load_infer_data_from_file(self, infile): + """Read and parse data from a file for infer document embedding. + + Args: + infile (str): text input file. Each line in this file is an instance. + + Yields: + obj, list, int: + - An iterator that yields parsed results, in the format of graph `feed_dict`. + - Impression id list. + - Size of the data in a batch. + """ + newsid_list = [] + candidate_news_index_batch = [] + candidate_news_entity_index_batch = [] + cnt = 0 + with tf.io.gfile.GFile(infile, "r") as rd: + for line in rd: + newsid, word_index, entity_index = line.strip().split(" ") + newsid_list.append(newsid) + candidate_news_index = [] + candidate_news_entity_index = [] + for item in word_index.split(","): + candidate_news_index.append(int(item)) + for item in entity_index.split(","): + candidate_news_entity_index.append(int(item)) + + candidate_news_index_batch.append(candidate_news_index) + candidate_news_entity_index_batch.append(candidate_news_entity_index) + + cnt += 1 + if cnt >= self.batch_size: + res = self._convert_infer_data( + candidate_news_index_batch, candidate_news_entity_index_batch + ) + data_size = self.batch_size + yield self.gen_infer_feed_dict(res), newsid_list, data_size + candidate_news_index_batch = [] + candidate_news_entity_index_batch = [] + newsid_list = [] + cnt = 0 + + if cnt > 0: + data_size = cnt + while cnt < self.batch_size: + candidate_news_index_batch.append( + candidate_news_index_batch[cnt % data_size] + ) + candidate_news_entity_index_batch.append( + candidate_news_entity_index_batch[cnt % data_size] + ) + cnt += 1 + res = self._convert_infer_data( + candidate_news_index_batch, candidate_news_entity_index_batch + ) + yield self.gen_infer_feed_dict(res), newsid_list, data_size
+ + def _convert_data( + self, + label_list, + candidate_news_index_batch, + click_news_index_batch, + candidate_news_entity_index_batch, + click_news_entity_index_batch, + impression_id_list, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + label_list (list): a list of ground-truth labels. + candidate_news_index_batch (list): the candidate news article's words indices + click_news_index_batch (list): words indices for user's clicked news articles + candidate_news_entity_index_batch (list): the candidate news article's entities indices + click_news_entity_index_batch (list): the user's clicked news article's entities indices + impression_id_list (list) : the session's impression indices + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + res = {} + res["labels"] = np.asarray([[label] for label in label_list], dtype=np.float32) + res["candidate_news_index_batch"] = np.asarray( + candidate_news_index_batch, dtype=np.int64 + ) + res["click_news_index_batch"] = np.asarray( + click_news_index_batch, dtype=np.int64 + ) + res["candidate_news_entity_index_batch"] = np.asarray( + candidate_news_entity_index_batch, dtype=np.int64 + ) + res["click_news_entity_index_batch"] = np.asarray( + click_news_entity_index_batch, dtype=np.int64 + ) + res["impression_id"] = np.asarray(impression_id_list, dtype=np.int64) + return res + + def _convert_infer_data( + self, candidate_news_index_batch, candidate_news_entity_index_batch + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + candidate_news_index_batch (list): the candidate news article's words indices + candidate_news_entity_index_batch (list): the candidate news article's entities indices + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + res = {} + res["candidate_news_index_batch"] = np.asarray( + candidate_news_index_batch, dtype=np.int64 + ) + res["candidate_news_entity_index_batch"] = np.asarray( + candidate_news_entity_index_batch, dtype=np.int64 + ) + return res + +
[docs] def gen_feed_dict(self, data_dict): + """Construct a dictionary that maps graph elements to values. + + Args: + data_dict (dict): a dictionary that maps string name to numpy arrays. + + Returns: + dict: A dictionary that maps graph elements to numpy arrays. + + """ + feed_dict = { + self.labels: data_dict["labels"].reshape([-1, 1]), + self.candidate_news_index_batch: data_dict[ + "candidate_news_index_batch" + ].reshape([self.batch_size, self.doc_size]), + self.click_news_index_batch: data_dict["click_news_index_batch"].reshape( + [self.batch_size, self.history_size, self.doc_size] + ), + self.candidate_news_entity_index_batch: data_dict[ + "candidate_news_entity_index_batch" + ].reshape([-1, self.doc_size]), + self.click_news_entity_index_batch: data_dict[ + "click_news_entity_index_batch" + ].reshape([-1, self.history_size, self.doc_size]), + } + return feed_dict
+ +
[docs] def gen_infer_feed_dict(self, data_dict): + """Construct a dictionary that maps graph elements to values. + + Args: + data_dict (dict): a dictionary that maps string name to numpy arrays. + + Returns: + dict: A dictionary that maps graph elements to numpy arrays. + + """ + feed_dict = { + self.candidate_news_index_batch: data_dict[ + "candidate_news_index_batch" + ].reshape([self.batch_size, self.doc_size]), + self.candidate_news_entity_index_batch: data_dict[ + "candidate_news_entity_index_batch" + ].reshape([-1, self.doc_size]), + } + return feed_dict
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/io/iterator.html b/_modules/recommenders/models/deeprec/io/iterator.html new file mode 100644 index 0000000000..d94170b986 --- /dev/null +++ b/_modules/recommenders/models/deeprec/io/iterator.html @@ -0,0 +1,630 @@ + + + + + + + + + + + recommenders.models.deeprec.io.iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.io.iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+import abc
+
+
+
[docs]class BaseIterator(object): + """Abstract base iterator class""" + +
[docs] @abc.abstractmethod + def parser_one_line(self, line): + """Abstract method. Parse one string line into feature values. + + Args: + line (str): A string indicating one instance. + """ + pass
+ +
[docs] @abc.abstractmethod + def load_data_from_file(self, infile): + """Abstract method. Read and parse data from a file. + + Args: + infile (str): Text input file. Each line in this file is an instance. + """ + pass
+ + @abc.abstractmethod + def _convert_data(self, labels, features): + pass + +
[docs] @abc.abstractmethod + def gen_feed_dict(self, data_dict): + """Abstract method. Construct a dictionary that maps graph elements to values. + + Args: + data_dict (dict): A dictionary that maps string name to numpy arrays. + """ + pass
+ + +
[docs]class FFMTextIterator(BaseIterator): + """Data loader for FFM format based models, such as xDeepFM. + Iterator will not load the whole data into memory. Instead, it loads data into memory + per mini-batch, so that large files can be used as input data. + """ + + def __init__(self, hparams, graph, col_spliter=" ", ID_spliter="%"): + """Initialize an iterator. Create the necessary placeholders for the model. + + Args: + hparams (object): Global hyper-parameters. Some key settings such as #_feature and #_field are there. + graph (object): The running graph. All created placeholder will be added to this graph. + col_spliter (str): column splitter in one line. + ID_spliter (str): ID splitter in one line. + """ + self.feature_cnt = hparams.FEATURE_COUNT + self.field_cnt = hparams.FIELD_COUNT + self.col_spliter = col_spliter + self.ID_spliter = ID_spliter + self.batch_size = hparams.batch_size + + self.graph = graph + with self.graph.as_default(): + self.labels = tf.compat.v1.placeholder(tf.float32, [None, 1], name="label") + self.fm_feat_indices = tf.compat.v1.placeholder( + tf.int64, [None, 2], name="fm_feat_indices" + ) + self.fm_feat_values = tf.compat.v1.placeholder( + tf.float32, [None], name="fm_feat_values" + ) + self.fm_feat_shape = tf.compat.v1.placeholder( + tf.int64, [None], name="fm_feat_shape" + ) + self.dnn_feat_indices = tf.compat.v1.placeholder( + tf.int64, [None, 2], name="dnn_feat_indices" + ) + self.dnn_feat_values = tf.compat.v1.placeholder( + tf.int64, [None], name="dnn_feat_values" + ) + self.dnn_feat_weights = tf.compat.v1.placeholder( + tf.float32, [None], name="dnn_feat_weights" + ) + self.dnn_feat_shape = tf.compat.v1.placeholder( + tf.int64, [None], name="dnn_feat_shape" + ) + +
[docs] def parser_one_line(self, line): + """Parse one string line into feature values. + + Args: + line (str): A string indicating one instance. + + Returns: + list: Parsed results, including `label`, `features` and `impression_id`. + + """ + impression_id = 0 + words = line.strip().split(self.ID_spliter) + if len(words) == 2: + impression_id = words[1].strip() + + cols = words[0].strip().split(self.col_spliter) + + label = float(cols[0]) + + features = [] + for word in cols[1:]: + if not word.strip(): + continue + tokens = word.split(":") + features.append([int(tokens[0]) - 1, int(tokens[1]) - 1, float(tokens[2])]) + + return label, features, impression_id
+ +
[docs] def load_data_from_file(self, infile): + """Read and parse data from a file. + + Args: + infile (str): Text input file. Each line in this file is an instance. + + Returns: + object: An iterator that yields parsed results, in the format of graph `feed_dict`. + """ + label_list = [] + features_list = [] + impression_id_list = [] + cnt = 0 + + with tf.io.gfile.GFile(infile, "r") as rd: + for line in rd: + label, features, impression_id = self.parser_one_line(line) + + features_list.append(features) + label_list.append(label) + impression_id_list.append(impression_id) + + cnt += 1 + if cnt == self.batch_size: + res = self._convert_data(label_list, features_list) + yield self.gen_feed_dict(res), impression_id_list, self.batch_size + label_list = [] + features_list = [] + impression_id_list = [] + cnt = 0 + if cnt > 0: + res = self._convert_data(label_list, features_list) + yield self.gen_feed_dict(res), impression_id_list, cnt
+ + def _convert_data(self, labels, features): + """Convert data into numpy arrays that are good for further operation. + + Args: + labels (list): a list of ground-truth labels. + features (list): a 3-dimensional list, carrying a list (batch_size) of feature array, + where each feature array is a list of `[field_idx, feature_idx, feature_value]` tuple. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + dim = self.feature_cnt + FIELD_COUNT = self.field_cnt + instance_cnt = len(labels) + + fm_feat_indices = [] + fm_feat_values = [] + fm_feat_shape = [instance_cnt, dim] + + dnn_feat_indices = [] + dnn_feat_values = [] + dnn_feat_weights = [] + dnn_feat_shape = [instance_cnt * FIELD_COUNT, -1] + + for i in range(instance_cnt): + m = len(features[i]) + dnn_feat_dic = {} + for j in range(m): + fm_feat_indices.append([i, features[i][j][1]]) + fm_feat_values.append(features[i][j][2]) + if features[i][j][0] not in dnn_feat_dic: + dnn_feat_dic[features[i][j][0]] = 0 + else: + dnn_feat_dic[features[i][j][0]] += 1 + dnn_feat_indices.append( + [ + i * FIELD_COUNT + features[i][j][0], + dnn_feat_dic[features[i][j][0]], + ] + ) + dnn_feat_values.append(features[i][j][1]) + dnn_feat_weights.append(features[i][j][2]) + if dnn_feat_shape[1] < dnn_feat_dic[features[i][j][0]]: + dnn_feat_shape[1] = dnn_feat_dic[features[i][j][0]] + dnn_feat_shape[1] += 1 + + sorted_index = sorted( + range(len(dnn_feat_indices)), + key=lambda k: (dnn_feat_indices[k][0], dnn_feat_indices[k][1]), + ) + + res = {} + res["fm_feat_indices"] = np.asarray(fm_feat_indices, dtype=np.int64) + res["fm_feat_values"] = np.asarray(fm_feat_values, dtype=np.float32) + res["fm_feat_shape"] = np.asarray(fm_feat_shape, dtype=np.int64) + res["labels"] = np.asarray([[label] for label in labels], dtype=np.float32) + + res["dnn_feat_indices"] = np.asarray(dnn_feat_indices, dtype=np.int64)[ + sorted_index + ] + res["dnn_feat_values"] = np.asarray(dnn_feat_values, dtype=np.int64)[ + sorted_index + ] + res["dnn_feat_weights"] = np.asarray(dnn_feat_weights, dtype=np.float32)[ + sorted_index + ] + res["dnn_feat_shape"] = np.asarray(dnn_feat_shape, dtype=np.int64) + return res + +
[docs] def gen_feed_dict(self, data_dict): + """Construct a dictionary that maps graph elements to values. + + Args: + data_dict (dict): A dictionary that maps string name to numpy arrays. + + Returns: + dict: A dictionary that maps graph elements to numpy arrays. + + """ + feed_dict = { + self.labels: data_dict["labels"], + self.fm_feat_indices: data_dict["fm_feat_indices"], + self.fm_feat_values: data_dict["fm_feat_values"], + self.fm_feat_shape: data_dict["fm_feat_shape"], + self.dnn_feat_indices: data_dict["dnn_feat_indices"], + self.dnn_feat_values: data_dict["dnn_feat_values"], + self.dnn_feat_weights: data_dict["dnn_feat_weights"], + self.dnn_feat_shape: data_dict["dnn_feat_shape"], + } + return feed_dict
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/io/nextitnet_iterator.html b/_modules/recommenders/models/deeprec/io/nextitnet_iterator.html new file mode 100644 index 0000000000..ff5e797012 --- /dev/null +++ b/_modules/recommenders/models/deeprec/io/nextitnet_iterator.html @@ -0,0 +1,657 @@ + + + + + + + + + + + recommenders.models.deeprec.io.nextitnet_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.io.nextitnet_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import numpy as np
+import random
+
+from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
+from recommenders.models.deeprec.deeprec_utils import load_dict
+
+
+__all__ = ["NextItNetIterator"]
+
+
+
[docs]class NextItNetIterator(SequentialIterator): + """Data loader for the NextItNet model. + + NextItNet requires a special type of data format. In training stage, each instance will + produce `(sequence_length * train_num_ngs)` target items and labels, to let NextItNet + output predictions of every item in a sequence except only of the last item. + """ + + def __init__(self, hparams, graph, col_spliter="\t"): + """Initialize an iterator. Create necessary placeholders for the model. + Different from sequential iterator + + Args: + hparams (object): Global hyper-parameters. Some key settings such as #_feature and #_field are there. + graph (object): The running graph. All created placeholder will be added to this graph. + col_spliter (str): Column splitter in one line. + """ + self.col_spliter = col_spliter + + self.userdict, self.itemdict, self.catedict = ( + load_dict(hparams.user_vocab), + load_dict(hparams.item_vocab), + load_dict(hparams.cate_vocab), + ) + + self.max_seq_length = hparams.max_seq_length + self.batch_size = hparams.batch_size + self.iter_data = dict() + + self.graph = graph + with self.graph.as_default(): + self.labels = tf.compat.v1.placeholder( + tf.float32, [None, None], name="label" + ) + self.users = tf.compat.v1.placeholder(tf.int32, [None], name="users") + self.items = tf.compat.v1.placeholder(tf.int32, [None, None], name="items") + self.cates = tf.compat.v1.placeholder(tf.int32, [None, None], name="cates") + self.item_history = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="item_history" + ) + self.item_cate_history = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="item_cate_history" + ) + self.mask = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="mask" + ) + self.time = tf.compat.v1.placeholder(tf.float32, [None], name="time") + self.time_diff = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_diff" + ) + self.time_from_first_action = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_from_first_action" + ) + self.time_to_now = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_to_now" + ) + + def _convert_data( + self, + label_list, + user_list, + item_list, + item_cate_list, + item_history_batch, + item_cate_history_batch, + time_list, + time_diff_list, + time_from_first_action_list, + time_to_now_list, + batch_num_ngs, + ): + """Convert data into numpy arrays that are good for further model operation. + Note: This is different from `sequential_iterator`. + + Args: + label_list (list): A list of ground-truth labels. + user_list (list): A list of user indexes. + item_list (list): A list of item indexes. + item_cate_list (list): A list of category indexes. + item_history_batch (list): A list of item history indexes. + item_cate_history_batch (list): A list of category history indexes. + time_list (list): A list of current timestamp. + time_diff_list (list): A list of timestamp between each sequential opertions. + time_from_first_action_list (list): A list of timestamp from the first opertion. + time_to_now_list (list): A list of timestamp to the current time. + batch_num_ngs (int): The number of negative sampling while training in mini-batch. + + Returns: + dict: A dictionary, contains multiple numpy arrays that are convenient for further operation. + """ + if batch_num_ngs: + instance_cnt = len(label_list) + if instance_cnt < 5: + return + + label_list_all = [] + item_list_all = [] + item_cate_list_all = [] + user_list_all = np.asarray( + [[user] * (batch_num_ngs + 1) for user in user_list], dtype=np.int32 + ).flatten() + time_list_all = np.asarray( + [[t] * (batch_num_ngs + 1) for t in time_list], dtype=np.float32 + ).flatten() + + history_lengths = [len(item_history_batch[i]) for i in range(instance_cnt)] + max_seq_length_batch = self.max_seq_length + item_history_batch_all = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch), + dtype=np.int32, + ) + item_cate_history_batch_all = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch), + dtype=np.int32, + ) + time_diff_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch), + dtype=np.float32, + ) + time_from_first_action_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch), + dtype=np.float32, + ) + time_to_now_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch), + dtype=np.float32, + ) + mask = np.zeros( + (instance_cnt * (1 + batch_num_ngs), max_seq_length_batch), + dtype=np.float32, + ) + + for i in range(instance_cnt): + this_length = min(history_lengths[i], max_seq_length_batch) + for index in range(batch_num_ngs + 1): + item_history_batch_all[ + i * (batch_num_ngs + 1) + index, -this_length: + ] = np.asarray(item_history_batch[i][-this_length:], dtype=np.int32) + item_cate_history_batch_all[ + i * (batch_num_ngs + 1) + index, -this_length: + ] = np.asarray( + item_cate_history_batch[i][-this_length:], dtype=np.int32 + ) + mask[i * (batch_num_ngs + 1) + index, -this_length:] = 1.0 + time_diff_batch[ + i * (batch_num_ngs + 1) + index, -this_length: + ] = np.asarray(time_diff_list[i][-this_length:], dtype=np.float32) + time_from_first_action_batch[ + i * (batch_num_ngs + 1) + index, -this_length: + ] = np.asarray( + time_from_first_action_list[i][-this_length:], dtype=np.float32 + ) + time_to_now_batch[ + i * (batch_num_ngs + 1) + index, -this_length: + ] = np.asarray(time_to_now_list[i][-this_length:], dtype=np.float32) + + for i in range(instance_cnt): + positive_item = [ + *item_history_batch_all[i * (batch_num_ngs + 1)][1:], + item_list[i], + ] + positive_item_cate = [ + *item_cate_history_batch_all[i * (batch_num_ngs + 1)][1:], + item_cate_list[i], + ] + label_list_all.append([1] * max_seq_length_batch) + item_list_all.append(positive_item) + item_cate_list_all.append(positive_item_cate) + + count = 0 + while count < batch_num_ngs: + negative_item_list = [] + negative_item_cate_list = [] + count_inner = 1 + while count_inner <= max_seq_length_batch: + random_value = random.randint(0, instance_cnt - 1) + negative_item = item_list[random_value] + if negative_item == positive_item[count_inner - 1]: + continue + negative_item_list.append(negative_item) + negative_item_cate_list.append(item_cate_list[random_value]) + count_inner += 1 + + label_list_all.append([0] * max_seq_length_batch) + item_list_all.append(negative_item_list) + item_cate_list_all.append(negative_item_cate_list) + count += 1 + + res = {} + res["labels"] = np.asarray( + label_list_all, dtype=np.float32 + ) # .reshape(-1,1) + res["users"] = user_list_all + res["items"] = np.asarray(item_list_all, dtype=np.int32) + res["cates"] = np.asarray(item_cate_list_all, dtype=np.int32) + res["item_history"] = item_history_batch_all + res["item_cate_history"] = item_cate_history_batch_all + res["mask"] = mask + res["time"] = time_list_all + res["time_diff"] = time_diff_batch + res["time_from_first_action"] = time_from_first_action_batch + res["time_to_now"] = time_to_now_batch + + return res + + else: + instance_cnt = len(label_list) + history_lengths = [len(item_history_batch[i]) for i in range(instance_cnt)] + max_seq_length_batch = self.max_seq_length + item_history_batch_all = np.zeros( + (instance_cnt, max_seq_length_batch), dtype=np.int32 + ) + item_cate_history_batch_all = np.zeros( + (instance_cnt, max_seq_length_batch), dtype=np.int32 + ) + time_diff_batch = np.zeros( + (instance_cnt, max_seq_length_batch), dtype=np.float32 + ) + time_from_first_action_batch = np.zeros( + (instance_cnt, max_seq_length_batch), dtype=np.float32 + ) + time_to_now_batch = np.zeros( + (instance_cnt, max_seq_length_batch), dtype=np.float32 + ) + mask = np.zeros((instance_cnt, max_seq_length_batch), dtype=np.float32) + + for i in range(instance_cnt): + this_length = min(history_lengths[i], max_seq_length_batch) + item_history_batch_all[i, -this_length:] = item_history_batch[i][ + -this_length: + ] + item_cate_history_batch_all[i, -this_length:] = item_cate_history_batch[ + i + ][-this_length:] + mask[i, -this_length:] = 1.0 + time_diff_batch[i, -this_length:] = time_diff_list[i][-this_length:] + time_from_first_action_batch[ + i, -this_length: + ] = time_from_first_action_list[i][-this_length:] + time_to_now_batch[i, -this_length:] = time_to_now_list[i][-this_length:] + + res = {} + res["labels"] = np.asarray(label_list, dtype=np.float32).reshape([-1, 1]) + res["users"] = np.asarray(user_list, dtype=np.float32) + res["items"] = np.asarray(item_list, dtype=np.int32).reshape([-1, 1]) + res["cates"] = np.asarray(item_cate_list, dtype=np.int32).reshape([-1, 1]) + res["item_history"] = item_history_batch_all + res["item_cate_history"] = item_cate_history_batch_all + res["mask"] = mask + res["time"] = np.asarray(time_list, dtype=np.float32) + res["time_diff"] = time_diff_batch + res["time_from_first_action"] = time_from_first_action_batch + res["time_to_now"] = time_to_now_batch + return res
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/io/sequential_iterator.html b/_modules/recommenders/models/deeprec/io/sequential_iterator.html new file mode 100644 index 0000000000..f10eca64b4 --- /dev/null +++ b/_modules/recommenders/models/deeprec/io/sequential_iterator.html @@ -0,0 +1,865 @@ + + + + + + + + + + + recommenders.models.deeprec.io.sequential_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.io.sequential_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import numpy as np
+import random
+
+from recommenders.models.deeprec.io.iterator import BaseIterator
+from recommenders.models.deeprec.deeprec_utils import load_dict
+
+
+__all__ = ["SequentialIterator"]
+
+
+
[docs]class SequentialIterator(BaseIterator): + def __init__(self, hparams, graph, col_spliter="\t"): + """Initialize an iterator. Create necessary placeholders for the model. + + Args: + hparams (object): Global hyper-parameters. Some key settings such as #_feature and #_field are there. + graph (object): The running graph. All created placeholder will be added to this graph. + col_spliter (str): Column splitter in one line. + """ + self.col_spliter = col_spliter + user_vocab, item_vocab, cate_vocab = ( + hparams.user_vocab, + hparams.item_vocab, + hparams.cate_vocab, + ) + self.userdict, self.itemdict, self.catedict = ( + load_dict(user_vocab), + load_dict(item_vocab), + load_dict(cate_vocab), + ) + + self.max_seq_length = hparams.max_seq_length + self.batch_size = hparams.batch_size + self.iter_data = dict() + + self.graph = graph + with self.graph.as_default(): + self.labels = tf.compat.v1.placeholder(tf.float32, [None, 1], name="label") + self.users = tf.compat.v1.placeholder(tf.int32, [None], name="users") + self.items = tf.compat.v1.placeholder(tf.int32, [None], name="items") + self.cates = tf.compat.v1.placeholder(tf.int32, [None], name="cates") + self.item_history = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="item_history" + ) + self.item_cate_history = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="item_cate_history" + ) + self.mask = tf.compat.v1.placeholder( + tf.int32, [None, self.max_seq_length], name="mask" + ) + self.time = tf.compat.v1.placeholder(tf.float32, [None], name="time") + self.time_diff = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_diff" + ) + self.time_from_first_action = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_from_first_action" + ) + self.time_to_now = tf.compat.v1.placeholder( + tf.float32, [None, self.max_seq_length], name="time_to_now" + ) + +
[docs] def parse_file(self, input_file): + """Parse the file to A list ready to be used for downstream tasks. + + Args: + input_file: One of train, valid or test file which has never been parsed. + + Returns: + list: A list with parsing result. + """ + with open(input_file, "r") as f: + lines = f.readlines() + res = [] + for line in lines: + if not line: + continue + res.append(self.parser_one_line(line)) + return res
+ +
[docs] def parser_one_line(self, line): + """Parse one string line into feature values. + + Args: + line (str): a string indicating one instance. + This string contains tab-separated values including: + label, user_hash, item_hash, item_cate, operation_time, item_history_sequence, + item_cate_history_sequence, and time_history_sequence. + + Returns: + list: Parsed results including `label`, `user_id`, `item_id`, `item_cate`, `item_history_sequence`, `cate_history_sequence`, + `current_time`, `time_diff`, `time_from_first_action`, `time_to_now`. + + """ + words = line.strip().split(self.col_spliter) + label = int(words[0]) + user_id = self.userdict[words[1]] if words[1] in self.userdict else 0 + item_id = self.itemdict[words[2]] if words[2] in self.itemdict else 0 + item_cate = self.catedict[words[3]] if words[3] in self.catedict else 0 + current_time = float(words[4]) + + item_history_sequence = [] + cate_history_sequence = [] + time_history_sequence = [] + + item_history_words = words[5].strip().split(",") + for item in item_history_words: + item_history_sequence.append( + self.itemdict[item] if item in self.itemdict else 0 + ) + + cate_history_words = words[6].strip().split(",") + for cate in cate_history_words: + cate_history_sequence.append( + self.catedict[cate] if cate in self.catedict else 0 + ) + + time_history_words = words[7].strip().split(",") + time_history_sequence = [float(i) for i in time_history_words] + + time_range = 3600 * 24 + + time_diff = [] + for i in range(len(time_history_sequence) - 1): + diff = ( + time_history_sequence[i + 1] - time_history_sequence[i] + ) / time_range + diff = max(diff, 0.5) + time_diff.append(diff) + last_diff = (current_time - time_history_sequence[-1]) / time_range + last_diff = max(last_diff, 0.5) + time_diff.append(last_diff) + time_diff = np.log(time_diff) + + time_from_first_action = [] + first_time = time_history_sequence[0] + time_from_first_action = [ + (t - first_time) / time_range for t in time_history_sequence[1:] + ] + time_from_first_action = [max(t, 0.5) for t in time_from_first_action] + last_diff = (current_time - first_time) / time_range + last_diff = max(last_diff, 0.5) + time_from_first_action.append(last_diff) + time_from_first_action = np.log(time_from_first_action) + + time_to_now = [] + time_to_now = [(current_time - t) / time_range for t in time_history_sequence] + time_to_now = [max(t, 0.5) for t in time_to_now] + time_to_now = np.log(time_to_now) + + return ( + label, + user_id, + item_id, + item_cate, + item_history_sequence, + cate_history_sequence, + current_time, + time_diff, + time_from_first_action, + time_to_now, + )
+ +
[docs] def load_data_from_file(self, infile, batch_num_ngs=0, min_seq_length=1): + """Read and parse data from a file. + + Args: + infile (str): Text input file. Each line in this file is an instance. + batch_num_ngs (int): The number of negative sampling here in batch. + 0 represents that there is no need to do negative sampling here. + min_seq_length (int): The minimum number of a sequence length. + Sequences with length lower than min_seq_length will be ignored. + + Yields: + object: An iterator that yields parsed results, in the format of graph `feed_dict`. + """ + label_list = [] + user_list = [] + item_list = [] + item_cate_list = [] + item_history_batch = [] + item_cate_history_batch = [] + time_list = [] + time_diff_list = [] + time_from_first_action_list = [] + time_to_now_list = [] + + cnt = 0 + + if infile not in self.iter_data: + lines = self.parse_file(infile) + self.iter_data[infile] = lines + else: + lines = self.iter_data[infile] + + if batch_num_ngs > 0: + random.shuffle(lines) + + for line in lines: + if not line: + continue + + ( + label, + user_id, + item_id, + item_cate, + item_history_sequence, + item_cate_history_sequence, + current_time, + time_diff, + time_from_first_action, + time_to_now, + ) = line + if len(item_history_sequence) < min_seq_length: + continue + + label_list.append(label) + user_list.append(user_id) + item_list.append(item_id) + item_cate_list.append(item_cate) + item_history_batch.append(item_history_sequence) + item_cate_history_batch.append(item_cate_history_sequence) + time_list.append(current_time) + time_diff_list.append(time_diff) + time_from_first_action_list.append(time_from_first_action) + time_to_now_list.append(time_to_now) + + cnt += 1 + if cnt == self.batch_size: + res = self._convert_data( + label_list, + user_list, + item_list, + item_cate_list, + item_history_batch, + item_cate_history_batch, + time_list, + time_diff_list, + time_from_first_action_list, + time_to_now_list, + batch_num_ngs, + ) + batch_input = self.gen_feed_dict(res) + yield batch_input if batch_input else None + label_list = [] + user_list = [] + item_list = [] + item_cate_list = [] + item_history_batch = [] + item_cate_history_batch = [] + time_list = [] + time_diff_list = [] + time_from_first_action_list = [] + time_to_now_list = [] + cnt = 0 + if cnt > 0: + res = self._convert_data( + label_list, + user_list, + item_list, + item_cate_list, + item_history_batch, + item_cate_history_batch, + time_list, + time_diff_list, + time_from_first_action_list, + time_to_now_list, + batch_num_ngs, + ) + batch_input = self.gen_feed_dict(res) + yield batch_input if batch_input else None
+ + def _convert_data( + self, + label_list, + user_list, + item_list, + item_cate_list, + item_history_batch, + item_cate_history_batch, + time_list, + time_diff_list, + time_from_first_action_list, + time_to_now_list, + batch_num_ngs, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + label_list (list): A list of ground-truth labels. + user_list (list): A list of user indexes. + item_list (list): A list of item indexes. + item_cate_list (list): A list of category indexes. + item_history_batch (list): A list of item history indexes. + item_cate_history_batch (list): A list of category history indexes. + time_list (list): A list of current timestamp. + time_diff_list (list): A list of timestamp between each sequential operations. + time_from_first_action_list (list): A list of timestamp from the first operation. + time_to_now_list (list): A list of timestamp to the current time. + batch_num_ngs (int): The number of negative sampling while training in mini-batch. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + if batch_num_ngs: + instance_cnt = len(label_list) + if instance_cnt < 5: + return + + label_list_all = [] + item_list_all = [] + item_cate_list_all = [] + user_list_all = np.asarray( + [[user] * (batch_num_ngs + 1) for user in user_list], dtype=np.int32 + ).flatten() + time_list_all = np.asarray( + [[t] * (batch_num_ngs + 1) for t in time_list], dtype=np.float32 + ).flatten() + + history_lengths = [len(item_history_batch[i]) for i in range(instance_cnt)] + max_seq_length_batch = self.max_seq_length + item_history_batch_all = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch) + ).astype("int32") + item_cate_history_batch_all = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch) + ).astype("int32") + time_diff_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch) + ).astype("float32") + time_from_first_action_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch) + ).astype("float32") + time_to_now_batch = np.zeros( + (instance_cnt * (batch_num_ngs + 1), max_seq_length_batch) + ).astype("float32") + mask = np.zeros( + (instance_cnt * (1 + batch_num_ngs), max_seq_length_batch) + ).astype("float32") + + for i in range(instance_cnt): + this_length = min(history_lengths[i], max_seq_length_batch) + for index in range(batch_num_ngs + 1): + item_history_batch_all[ + i * (batch_num_ngs + 1) + index, :this_length + ] = np.asarray(item_history_batch[i][-this_length:], dtype=np.int32) + item_cate_history_batch_all[ + i * (batch_num_ngs + 1) + index, :this_length + ] = np.asarray( + item_cate_history_batch[i][-this_length:], dtype=np.int32 + ) + mask[i * (batch_num_ngs + 1) + index, :this_length] = 1.0 + time_diff_batch[ + i * (batch_num_ngs + 1) + index, :this_length + ] = np.asarray(time_diff_list[i][-this_length:], dtype=np.float32) + time_from_first_action_batch[ + i * (batch_num_ngs + 1) + index, :this_length + ] = np.asarray( + time_from_first_action_list[i][-this_length:], dtype=np.float32 + ) + time_to_now_batch[ + i * (batch_num_ngs + 1) + index, :this_length + ] = np.asarray(time_to_now_list[i][-this_length:], dtype=np.float32) + + for i in range(instance_cnt): + positive_item = item_list[i] + label_list_all.append(1) + item_list_all.append(positive_item) + item_cate_list_all.append(item_cate_list[i]) + count = 0 + while batch_num_ngs: + random_value = random.randint(0, instance_cnt - 1) + negative_item = item_list[random_value] + if negative_item == positive_item: + continue + label_list_all.append(0) + item_list_all.append(negative_item) + item_cate_list_all.append(item_cate_list[random_value]) + count += 1 + if count == batch_num_ngs: + break + + res = {} + res["labels"] = np.asarray(label_list_all, dtype=np.float32).reshape(-1, 1) + res["users"] = user_list_all + res["items"] = np.asarray(item_list_all, dtype=np.int32) + res["cates"] = np.asarray(item_cate_list_all, dtype=np.int32) + res["item_history"] = item_history_batch_all + res["item_cate_history"] = item_cate_history_batch_all + res["mask"] = mask + res["time"] = time_list_all + res["time_diff"] = time_diff_batch + res["time_from_first_action"] = time_from_first_action_batch + res["time_to_now"] = time_to_now_batch + return res + + else: + instance_cnt = len(label_list) + history_lengths = [len(item_history_batch[i]) for i in range(instance_cnt)] + max_seq_length_batch = self.max_seq_length + item_history_batch_all = np.zeros( + (instance_cnt, max_seq_length_batch) + ).astype("int32") + item_cate_history_batch_all = np.zeros( + (instance_cnt, max_seq_length_batch) + ).astype("int32") + time_diff_batch = np.zeros((instance_cnt, max_seq_length_batch)).astype( + "float32" + ) + time_from_first_action_batch = np.zeros( + (instance_cnt, max_seq_length_batch) + ).astype("float32") + time_to_now_batch = np.zeros((instance_cnt, max_seq_length_batch)).astype( + "float32" + ) + mask = np.zeros((instance_cnt, max_seq_length_batch)).astype("float32") + + for i in range(instance_cnt): + this_length = min(history_lengths[i], max_seq_length_batch) + item_history_batch_all[i, :this_length] = item_history_batch[i][ + -this_length: + ] + item_cate_history_batch_all[i, :this_length] = item_cate_history_batch[ + i + ][-this_length:] + mask[i, :this_length] = 1.0 + time_diff_batch[i, :this_length] = time_diff_list[i][-this_length:] + time_from_first_action_batch[ + i, :this_length + ] = time_from_first_action_list[i][-this_length:] + time_to_now_batch[i, :this_length] = time_to_now_list[i][-this_length:] + + res = {} + res["labels"] = np.asarray(label_list, dtype=np.float32).reshape(-1, 1) + res["users"] = np.asarray(user_list, dtype=np.float32) + res["items"] = np.asarray(item_list, dtype=np.int32) + res["cates"] = np.asarray(item_cate_list, dtype=np.int32) + res["item_history"] = item_history_batch_all + res["item_cate_history"] = item_cate_history_batch_all + res["mask"] = mask + res["time"] = np.asarray(time_list, dtype=np.float32) + res["time_diff"] = time_diff_batch + res["time_from_first_action"] = time_from_first_action_batch + res["time_to_now"] = time_to_now_batch + return res + +
[docs] def gen_feed_dict(self, data_dict): + """Construct a dictionary that maps graph elements to values. + + Args: + data_dict (dict): A dictionary that maps string name to numpy arrays. + + Returns: + dict: A dictionary that maps graph elements to numpy arrays. + + """ + if not data_dict: + return dict() + feed_dict = { + self.labels: data_dict["labels"], + self.users: data_dict["users"], + self.items: data_dict["items"], + self.cates: data_dict["cates"], + self.item_history: data_dict["item_history"], + self.item_cate_history: data_dict["item_cate_history"], + self.mask: data_dict["mask"], + self.time: data_dict["time"], + self.time_diff: data_dict["time_diff"], + self.time_from_first_action: data_dict["time_from_first_action"], + self.time_to_now: data_dict["time_to_now"], + } + return feed_dict
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/base_model.html b/_modules/recommenders/models/deeprec/models/base_model.html new file mode 100644 index 0000000000..b0110802d6 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/base_model.html @@ -0,0 +1,1124 @@ + + + + + + + + + + + recommenders.models.deeprec.models.base_model — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.base_model

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+from os.path import join
+import abc
+import time
+import os
+import numpy as np
+import tensorflow as tf
+from recommenders.models.deeprec.deeprec_utils import cal_metric
+
+
+tf.compat.v1.disable_eager_execution()
+__all__ = ["BaseModel"]
+
+
+
[docs]class BaseModel: + """Base class for models""" + + def __init__(self, hparams, iterator_creator, graph=None, seed=None): + """Initializing the model. Create common logics which are needed by all deeprec models, such as loss function, + parameter set. + + Args: + hparams (object): An `HParams` object, holds the entire set of hyperparameters. + iterator_creator (object): An iterator to load the data. + graph (object): An optional graph. + seed (int): Random seed. + """ + self.seed = seed + tf.compat.v1.set_random_seed(seed) + np.random.seed(seed) + + self.graph = graph if graph is not None else tf.Graph() + self.iterator = iterator_creator(hparams, self.graph) + self.train_num_ngs = ( + hparams.train_num_ngs if "train_num_ngs" in hparams.values() else None + ) + + with self.graph.as_default(): + self.hparams = hparams + + self.layer_params = [] + self.embed_params = [] + self.cross_params = [] + self.layer_keeps = tf.compat.v1.placeholder(tf.float32, name="layer_keeps") + self.keep_prob_train = None + self.keep_prob_test = None + self.is_train_stage = tf.compat.v1.placeholder( + tf.bool, shape=(), name="is_training" + ) + self.group = tf.compat.v1.placeholder(tf.int32, shape=(), name="group") + + self.initializer = self._get_initializer() + + self.logit = self._build_graph() + self.pred = self._get_pred(self.logit, self.hparams.method) + + self.loss = self._get_loss() + self.saver = tf.compat.v1.train.Saver(max_to_keep=self.hparams.epochs) + self.update = self._build_train_opt() + self.extra_update_ops = tf.compat.v1.get_collection( + tf.compat.v1.GraphKeys.UPDATE_OPS + ) + self.init_op = tf.compat.v1.global_variables_initializer() + self.merged = self._add_summaries() + + # set GPU use with on demand growth + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + self.sess = tf.compat.v1.Session( + graph=self.graph, config=tf.compat.v1.ConfigProto(gpu_options=gpu_options) + ) + self.sess.run(self.init_op) + + @abc.abstractmethod + def _build_graph(self): + """Subclass will implement this.""" + pass + + def _get_loss(self): + """Make loss function, consists of data loss and regularization loss + + Returns: + object: Loss value. + """ + self.data_loss = self._compute_data_loss() + self.regular_loss = self._compute_regular_loss() + self.loss = tf.add(self.data_loss, self.regular_loss) + return self.loss + + def _get_pred(self, logit, task): + """Make final output as prediction score, according to different tasks. + + Args: + logit (object): Base prediction value. + task (str): A task (values: regression/classification) + + Returns: + object: Transformed score. + """ + if task == "regression": + pred = tf.identity(logit) + elif task == "classification": + pred = tf.sigmoid(logit) + else: + raise ValueError( + "method must be regression or classification, but now is {0}".format( + task + ) + ) + pred = tf.identity(pred, name="pred") + return pred + + def _add_summaries(self): + tf.compat.v1.summary.scalar("data_loss", self.data_loss) + tf.compat.v1.summary.scalar("regular_loss", self.regular_loss) + tf.compat.v1.summary.scalar("loss", self.loss) + merged = tf.compat.v1.summary.merge_all() + return merged + + def _l2_loss(self): + l2_loss = tf.zeros([1], dtype=tf.float32) + # embedding_layer l2 loss + for param in self.embed_params: + l2_loss = tf.add( + l2_loss, tf.multiply(self.hparams.embed_l2, tf.nn.l2_loss(param)) + ) + params = self.layer_params + for param in params: + l2_loss = tf.add( + l2_loss, tf.multiply(self.hparams.layer_l2, tf.nn.l2_loss(param)) + ) + return l2_loss + + def _l1_loss(self): + l1_loss = tf.zeros([1], dtype=tf.float32) + # embedding_layer l2 loss + for param in self.embed_params: + l1_loss = tf.add( + l1_loss, + tf.multiply(self.hparams.embed_l1, tf.norm(tensor=param, ord=1)), + ) + params = self.layer_params + for param in params: + l1_loss = tf.add( + l1_loss, + tf.multiply(self.hparams.layer_l1, tf.norm(tensor=param, ord=1)), + ) + return l1_loss + + def _cross_l_loss(self): + """Construct L1-norm and L2-norm on cross network parameters for loss function. + + Returns: + object: Regular loss value on cross network parameters. + """ + cross_l_loss = tf.zeros([1], dtype=tf.float32) + for param in self.cross_params: + cross_l_loss = tf.add( + cross_l_loss, + tf.multiply(self.hparams.cross_l1, tf.norm(tensor=param, ord=1)), + ) + cross_l_loss = tf.add( + cross_l_loss, + tf.multiply(self.hparams.cross_l2, tf.norm(tensor=param, ord=2)), + ) + return cross_l_loss + + def _get_initializer(self): + if self.hparams.init_method == "tnormal": + return tf.compat.v1.truncated_normal_initializer( + stddev=self.hparams.init_value, seed=self.seed + ) + elif self.hparams.init_method == "uniform": + return tf.compat.v1.random_uniform_initializer( + -self.hparams.init_value, self.hparams.init_value, seed=self.seed + ) + elif self.hparams.init_method == "normal": + return tf.compat.v1.random_normal_initializer( + stddev=self.hparams.init_value, seed=self.seed + ) + elif self.hparams.init_method == "xavier_normal": + return tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution=("uniform" if False else "truncated_normal"), + seed=self.seed, + ) + elif self.hparams.init_method == "xavier_uniform": + return tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution=("uniform" if True else "truncated_normal"), + seed=self.seed, + ) + elif self.hparams.init_method == "he_normal": + return tf.compat.v1.keras.initializers.VarianceScaling( + scale=2.0, + mode=("FAN_IN").lower(), + distribution=("uniform" if False else "truncated_normal"), + seed=self.seed, + ) + elif self.hparams.init_method == "he_uniform": + return tf.compat.v1.keras.initializers.VarianceScaling( + scale=2.0, + mode=("FAN_IN").lower(), + distribution=("uniform" if True else "truncated_normal"), + seed=self.seed, + ) + else: + return tf.compat.v1.truncated_normal_initializer( + stddev=self.hparams.init_value, seed=self.seed + ) + + def _compute_data_loss(self): + if self.hparams.loss == "cross_entropy_loss": + data_loss = tf.reduce_mean( + input_tensor=tf.nn.sigmoid_cross_entropy_with_logits( + logits=tf.reshape(self.logit, [-1]), + labels=tf.reshape(self.iterator.labels, [-1]), + ) + ) + elif self.hparams.loss == "square_loss": + data_loss = tf.sqrt( + tf.reduce_mean( + input_tensor=tf.math.squared_difference( + tf.reshape(self.pred, [-1]), + tf.reshape(self.iterator.labels, [-1]), + ) + ) + ) + elif self.hparams.loss == "log_loss": + data_loss = tf.reduce_mean( + input_tensor=tf.compat.v1.losses.log_loss( + predictions=tf.reshape(self.pred, [-1]), + labels=tf.reshape(self.iterator.labels, [-1]), + ) + ) + elif self.hparams.loss == "softmax": + group = self.train_num_ngs + 1 + logits = tf.reshape(self.logit, (-1, group)) + if self.hparams.model_type == "NextItNet": + labels = ( + tf.transpose( + a=tf.reshape( + self.iterator.labels, + (-1, group, self.hparams.max_seq_length), + ), + perm=[0, 2, 1], + ), + ) + labels = tf.reshape(labels, (-1, group)) + else: + labels = tf.reshape(self.iterator.labels, (-1, group)) + softmax_pred = tf.nn.softmax(logits, axis=-1) + boolean_mask = tf.equal(labels, tf.ones_like(labels)) + mask_paddings = tf.ones_like(softmax_pred) + pos_softmax = tf.compat.v1.where(boolean_mask, softmax_pred, mask_paddings) + data_loss = -group * tf.reduce_mean(input_tensor=tf.math.log(pos_softmax)) + else: + raise ValueError("this loss not defined {0}".format(self.hparams.loss)) + return data_loss + + def _compute_regular_loss(self): + """Construct regular loss. Usually it's comprised of l1 and l2 norm. + Users can designate which norm to be included via config file. + + Returns: + object: Regular loss. + """ + regular_loss = self._l2_loss() + self._l1_loss() + self._cross_l_loss() + return tf.reduce_sum(input_tensor=regular_loss) + + def _train_opt(self): + """Get the optimizer according to configuration. Usually we will use Adam. + + Returns: + object: An optimizer. + """ + lr = self.hparams.learning_rate + optimizer = self.hparams.optimizer + + if optimizer == "adadelta": + train_step = tf.compat.v1.train.AdadeltaOptimizer(lr) + elif optimizer == "adagrad": + train_step = tf.compat.v1.train.AdagradOptimizer(lr) + elif optimizer == "sgd": + train_step = tf.compat.v1.train.GradientDescentOptimizer(lr) + elif optimizer == "adam": + train_step = tf.compat.v1.train.AdamOptimizer(lr) + elif optimizer == "ftrl": + train_step = tf.compat.v1.train.FtrlOptimizer(lr) + elif optimizer == "gd": + train_step = tf.compat.v1.train.GradientDescentOptimizer(lr) + elif optimizer == "padagrad": + train_step = tf.compat.v1.train.ProximalAdagradOptimizer(lr) + elif optimizer == "pgd": + train_step = tf.compat.v1.train.ProximalGradientDescentOptimizer(lr) + elif optimizer == "rmsprop": + train_step = tf.compat.v1.train.RMSPropOptimizer(lr) + else: + train_step = tf.compat.v1.train.GradientDescentOptimizer(lr) + return train_step + + def _build_train_opt(self): + """Construct gradient descent based optimization step + In this step, we provide gradient clipping option. Sometimes we what to clip the gradients + when their absolute values are too large to avoid gradient explosion. + + Returns: + object: An operation that applies the specified optimization step. + """ + train_step = self._train_opt() + gradients, variables = zip(*train_step.compute_gradients(self.loss)) + if self.hparams.is_clip_norm: + gradients = [ + None + if gradient is None + else tf.clip_by_norm(gradient, self.hparams.max_grad_norm) + for gradient in gradients + ] + return train_step.apply_gradients(zip(gradients, variables)) + + def _active_layer(self, logit, activation, layer_idx=-1): + """Transform the input value with an activation. May use dropout. + + Args: + logit (object): Input value. + activation (str): A string indicating the type of activation function. + layer_idx (int): Index of current layer. Used to retrieve corresponding parameters + + Returns: + object: A tensor after applying activation function on logit. + """ + if layer_idx >= 0 and self.hparams.user_dropout: + logit = self._dropout(logit, self.layer_keeps[layer_idx]) + return self._activate(logit, activation) + + def _activate(self, logit, activation): + if activation == "sigmoid": + return tf.nn.sigmoid(logit) + elif activation == "softmax": + return tf.nn.softmax(logit) + elif activation == "relu": + return tf.nn.relu(logit) + elif activation == "tanh": + return tf.nn.tanh(logit) + elif activation == "elu": + return tf.nn.elu(logit) + elif activation == "identity": + return tf.identity(logit) + else: + raise ValueError("this activations not defined {0}".format(activation)) + + def _dropout(self, logit, keep_prob): + """Apply drops upon the input value. + + Args: + logit (object): The input value. + keep_prob (float): The probability of keeping each element. + + Returns: + object: A tensor of the same shape of logit. + """ + return tf.nn.dropout(x=logit, rate=1 - (keep_prob)) + +
[docs] def train(self, sess, feed_dict): + """Go through the optimization step once with training data in `feed_dict`. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values to train the model. This is a dictionary that maps graph elements to values. + + Returns: + list: A list of values, including update operation, total loss, data loss, and merged summary. + """ + feed_dict[self.layer_keeps] = self.keep_prob_train + feed_dict[self.is_train_stage] = True + return sess.run( + [ + self.update, + self.extra_update_ops, + self.loss, + self.data_loss, + self.merged, + ], + feed_dict=feed_dict, + )
+ +
[docs] def eval(self, sess, feed_dict): + """Evaluate the data in `feed_dict` with current model. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values. + + Returns: + list: A list of evaluated results, including total loss value, data loss value, predicted scores, and ground-truth labels. + """ + feed_dict[self.layer_keeps] = self.keep_prob_test + feed_dict[self.is_train_stage] = False + return sess.run([self.pred, self.iterator.labels], feed_dict=feed_dict)
+ +
[docs] def infer(self, sess, feed_dict): + """Given feature data (in `feed_dict`), get predicted scores with current model. + + Args: + sess (object): The model session object. + feed_dict (dict): Instances to predict. This is a dictionary that maps graph elements to values. + + Returns: + list: Predicted scores for the given instances. + """ + feed_dict[self.layer_keeps] = self.keep_prob_test + feed_dict[self.is_train_stage] = False + return sess.run([self.pred], feed_dict=feed_dict)
+ +
[docs] def load_model(self, model_path=None): + """Load an existing model. + + Args: + model_path: model path. + + Raises: + IOError: if the restore operation failed. + """ + act_path = self.hparams.load_saved_model + if model_path is not None: + act_path = model_path + + try: + self.saver.restore(self.sess, act_path) + except Exception: + raise IOError("Failed to find any matching files for {0}".format(act_path))
+ +
[docs] def fit(self, train_file, valid_file, test_file=None): + """Fit the model with `train_file`. Evaluate the model on valid_file per epoch to observe the training status. + If `test_file` is not None, evaluate it too. + + Args: + train_file (str): training data set. + valid_file (str): validation set. + test_file (str): test set. + + Returns: + object: An instance of self. + """ + if self.hparams.write_tfevents: + self.writer = tf.compat.v1.summary.FileWriter( + self.hparams.SUMMARIES_DIR, self.sess.graph + ) + + train_sess = self.sess + for epoch in range(1, self.hparams.epochs + 1): + step = 0 + self.hparams.current_epoch = epoch + + epoch_loss = 0 + train_start = time.time() + for ( + batch_data_input, + impression, + data_size, + ) in self.iterator.load_data_from_file(train_file): + step_result = self.train(train_sess, batch_data_input) + (_, _, step_loss, step_data_loss, summary) = step_result + if self.hparams.write_tfevents: + self.writer.add_summary(summary, step) + epoch_loss += step_loss + step += 1 + if step % self.hparams.show_step == 0: + print( + "step {0:d} , total_loss: {1:.4f}, data_loss: {2:.4f}".format( + step, step_loss, step_data_loss + ) + ) + + train_end = time.time() + train_time = train_end - train_start + + if self.hparams.save_model: + if not os.path.exists(self.hparams.MODEL_DIR): + os.makedirs(self.hparams.MODEL_DIR) + if epoch % self.hparams.save_epoch == 0: + save_path_str = join(self.hparams.MODEL_DIR, "epoch_" + str(epoch)) + self.saver.save(sess=train_sess, save_path=save_path_str) + + eval_start = time.time() + eval_res = self.run_eval(valid_file) + train_info = ",".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in [("logloss loss", epoch_loss / step)] + ] + ) + eval_info = ", ".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in sorted(eval_res.items(), key=lambda x: x[0]) + ] + ) + if test_file is not None: + test_res = self.run_eval(test_file) + test_info = ", ".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in sorted(test_res.items(), key=lambda x: x[0]) + ] + ) + eval_end = time.time() + eval_time = eval_end - eval_start + + if test_file is not None: + print( + "at epoch {0:d}".format(epoch) + + "\ntrain info: " + + train_info + + "\neval info: " + + eval_info + + "\ntest info: " + + test_info + ) + else: + print( + "at epoch {0:d}".format(epoch) + + "\ntrain info: " + + train_info + + "\neval info: " + + eval_info + ) + print( + "at epoch {0:d} , train time: {1:.1f} eval time: {2:.1f}".format( + epoch, train_time, eval_time + ) + ) + + if self.hparams.write_tfevents: + self.writer.close() + + return self
+ +
[docs] def group_labels(self, labels, preds, group_keys): + """Devide `labels` and `preds` into several group according to values in group keys. + + Args: + labels (list): ground truth label list. + preds (list): prediction score list. + group_keys (list): group key list. + + Returns: + list, list: + - Labels after group. + - Predictions after group. + """ + all_keys = list(set(group_keys)) + group_labels = {k: [] for k in all_keys} + group_preds = {k: [] for k in all_keys} + for label, p, k in zip(labels, preds, group_keys): + group_labels[k].append(label) + group_preds[k].append(p) + all_labels = [] + all_preds = [] + for k in all_keys: + all_labels.append(group_labels[k]) + all_preds.append(group_preds[k]) + return all_labels, all_preds
+ +
[docs] def run_eval(self, filename): + """Evaluate the given file and returns some evaluation metrics. + + Args: + filename (str): A file name that will be evaluated. + + Returns: + dict: A dictionary that contains evaluation metrics. + """ + load_sess = self.sess + preds = [] + labels = [] + imp_indexs = [] + for batch_data_input, imp_index, data_size in self.iterator.load_data_from_file( + filename + ): + step_pred, step_labels = self.eval(load_sess, batch_data_input) + preds.extend(np.reshape(step_pred, -1)) + labels.extend(np.reshape(step_labels, -1)) + imp_indexs.extend(np.reshape(imp_index, -1)) + res = cal_metric(labels, preds, self.hparams.metrics) + if "pairwise_metrics" in self.hparams.values(): + group_labels, group_preds = self.group_labels(labels, preds, imp_indexs) + res_pairwise = cal_metric( + group_labels, group_preds, self.hparams.pairwise_metrics + ) + res.update(res_pairwise) + return res
+ +
[docs] def predict(self, infile_name, outfile_name): + """Make predictions on the given data, and output predicted scores to a file. + + Args: + infile_name (str): Input file name, format is same as train/val/test file. + outfile_name (str): Output file name, each line is the predict score. + + Returns: + object: An instance of self. + """ + load_sess = self.sess + with tf.io.gfile.GFile(outfile_name, "w") as wt: + for batch_data_input, _, data_size in self.iterator.load_data_from_file( + infile_name + ): + step_pred = self.infer(load_sess, batch_data_input) + step_pred = step_pred[0][:data_size] + step_pred = np.reshape(step_pred, -1) + wt.write("\n".join(map(str, step_pred))) + # line break after each batch. + wt.write("\n") + return self
+ + def _attention(self, inputs, attention_size): + """Soft alignment attention implement. + + Args: + inputs (object): Sequences ready to apply attention. + attention_size (int): The dimension of attention operation. + + Returns: + object: Weighted sum after attention. + """ + hidden_size = inputs.shape[2] + if not attention_size: + attention_size = hidden_size + + attention_mat = tf.compat.v1.get_variable( + name="attention_mat", + shape=[inputs.shape[-1], hidden_size], + initializer=self.initializer, + ) + att_inputs = tf.tensordot(inputs, attention_mat, [[2], [0]]) + + query = tf.compat.v1.get_variable( + name="query", + shape=[attention_size], + dtype=tf.float32, + initializer=self.initializer, + ) + att_logits = tf.tensordot(att_inputs, query, axes=1, name="att_logits") + att_weights = tf.nn.softmax(att_logits, name="att_weights") + output = inputs * tf.expand_dims(att_weights, -1) + return output + + def _fcn_net(self, model_output, layer_sizes, scope): + """Construct the MLP part for the model. + + Args: + model_output (object): The output of upper layers, input of MLP part + layer_sizes (list): The shape of each layer of MLP part + scope (object): The scope of MLP part + + Returns: + object: Prediction logit after fully connected layer. + """ + hparams = self.hparams + with tf.compat.v1.variable_scope(scope): + last_layer_size = model_output.shape[-1] + layer_idx = 0 + hidden_nn_layers = [] + hidden_nn_layers.append(model_output) + with tf.compat.v1.variable_scope( + "nn_part", initializer=self.initializer + ) as scope: + for idx, layer_size in enumerate(layer_sizes): + curr_w_nn_layer = tf.compat.v1.get_variable( + name="w_nn_layer" + str(layer_idx), + shape=[last_layer_size, layer_size], + dtype=tf.float32, + ) + curr_b_nn_layer = tf.compat.v1.get_variable( + name="b_nn_layer" + str(layer_idx), + shape=[layer_size], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "w_nn_layer" + str(layer_idx), curr_w_nn_layer + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "b_nn_layer" + str(layer_idx), curr_b_nn_layer + ) + curr_hidden_nn_layer = ( + tf.tensordot( + hidden_nn_layers[layer_idx], curr_w_nn_layer, axes=1 + ) + + curr_b_nn_layer + ) + + scope = "nn_part" + str(idx) + activation = hparams.activation[idx] + + if hparams.enable_BN is True: + curr_hidden_nn_layer = tf.compat.v1.layers.batch_normalization( + curr_hidden_nn_layer, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + curr_hidden_nn_layer = self._active_layer( + logit=curr_hidden_nn_layer, activation=activation, layer_idx=idx + ) + hidden_nn_layers.append(curr_hidden_nn_layer) + layer_idx += 1 + last_layer_size = layer_size + + w_nn_output = tf.compat.v1.get_variable( + name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32 + ) + b_nn_output = tf.compat.v1.get_variable( + name="b_nn_output", + shape=[1], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "w_nn_output" + str(layer_idx), w_nn_output + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "b_nn_output" + str(layer_idx), b_nn_output + ) + nn_output = ( + tf.tensordot(hidden_nn_layers[-1], w_nn_output, axes=1) + + b_nn_output + ) + self.logit = nn_output + return nn_output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/dkn.html b/_modules/recommenders/models/deeprec/models/dkn.html new file mode 100644 index 0000000000..023369a1a3 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/dkn.html @@ -0,0 +1,875 @@ + + + + + + + + + + + recommenders.models.deeprec.models.dkn — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.dkn

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+
+from recommenders.models.deeprec.models.base_model import BaseModel
+
+__all__ = ["DKN"]
+
+
+
[docs]class DKN(BaseModel): + """DKN model (Deep Knowledge-Aware Network) + + :Citation: + + H. Wang, F. Zhang, X. Xie and M. Guo, "DKN: Deep Knowledge-Aware Network for News + Recommendation", in Proceedings of the 2018 World Wide Web Conference on World + Wide Web, 2018. + """ + + def __init__(self, hparams, iterator_creator): + """Initialization steps for DKN. + Compared with the BaseModel, DKN requires two different pre-computed embeddings, + i.e. word embedding and entity embedding. + After creating these two embedding variables, BaseModel's `__init__` method will be called. + + Args: + hparams (object): Global hyper-parameters. + iterator_creator (object): DKN data loader class. + """ + self.graph = tf.Graph() + with self.graph.as_default(): + with tf.compat.v1.name_scope("embedding"): + word2vec_embedding = self._init_embedding(hparams.wordEmb_file) + self.embedding = tf.Variable( + word2vec_embedding, trainable=True, name="word" + ) + + if hparams.use_entity: + e_embedding = self._init_embedding(hparams.entityEmb_file) + W = tf.Variable( + tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1), + trainable=True, + ) + b = tf.Variable(tf.zeros([hparams.dim]), trainable=True) + self.entity_embedding = tf.nn.tanh(tf.matmul(e_embedding, W) + b) + else: + self.entity_embedding = tf.Variable( + tf.constant( + 0.0, + shape=[hparams.entity_size, hparams.dim], + dtype=tf.float32, + ), + trainable=True, + name="entity", + ) + + if hparams.use_context: + c_embedding = self._init_embedding(hparams.contextEmb_file) + W = tf.Variable( + tf.random.uniform([hparams.entity_dim, hparams.dim], -1, 1), + trainable=True, + ) + b = tf.Variable(tf.zeros([hparams.dim]), trainable=True) + self.context_embedding = tf.nn.tanh(tf.matmul(c_embedding, W) + b) + else: + self.context_embedding = tf.Variable( + tf.constant( + 0.0, + shape=[hparams.entity_size, hparams.dim], + dtype=tf.float32, + ), + trainable=True, + name="context", + ) + + super().__init__(hparams, iterator_creator, graph=self.graph) + + def _init_embedding(self, file_path): + """Load pre-trained embeddings as a constant tensor. + + Args: + file_path (str): the pre-trained embeddings filename. + + Returns: + object: A constant tensor. + """ + return tf.constant(np.load(file_path).astype(np.float32)) + + def _l2_loss(self): + hparams = self.hparams + l2_loss = tf.zeros([1], dtype=tf.float32) + # embedding_layer l2 loss + l2_loss = tf.add( + l2_loss, tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.embedding)) + ) + if hparams.use_entity: + l2_loss = tf.add( + l2_loss, + tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.entity_embedding)), + ) + if hparams.use_entity and hparams.use_context: + l2_loss = tf.add( + l2_loss, + tf.multiply(hparams.embed_l2, tf.nn.l2_loss(self.context_embedding)), + ) + params = self.layer_params + for param in params: + l2_loss = tf.add( + l2_loss, tf.multiply(hparams.layer_l2, tf.nn.l2_loss(param)) + ) + return l2_loss + + def _l1_loss(self): + hparams = self.hparams + l1_loss = tf.zeros([1], dtype=tf.float32) + # embedding_layer l2 loss + l1_loss = tf.add( + l1_loss, + tf.multiply(hparams.embed_l1, tf.norm(tensor=self.embedding, ord=1)), + ) + if hparams.use_entity: + l1_loss = tf.add( + l1_loss, + tf.multiply( + hparams.embed_l1, tf.norm(tensor=self.entity_embedding, ord=1) + ), + ) + if hparams.use_entity and hparams.use_context: + l1_loss = tf.add( + l1_loss, + tf.multiply( + hparams.embed_l1, tf.norm(tensor=self.context_embedding, ord=1) + ), + ) + params = self.layer_params + for param in params: + l1_loss = tf.add( + l1_loss, tf.multiply(hparams.layer_l1, tf.norm(tensor=param, ord=1)) + ) + return l1_loss + + def _build_graph(self): + hparams = self.hparams + self.keep_prob_train = 1 - np.array(hparams.dropout) + self.keep_prob_test = np.ones_like(hparams.dropout) + with tf.compat.v1.variable_scope("DKN"): + logit = self._build_dkn() + return logit + + def _build_dkn(self): + """The main function to create DKN's logic. + + Returns: + object: Prediction score made by the DKN model. + """ + hparams = self.hparams + # build attention model for clicked news and candidate news + click_news_embed_batch, candidate_news_embed_batch = self._build_pair_attention( + self.iterator.candidate_news_index_batch, + self.iterator.candidate_news_entity_index_batch, + self.iterator.click_news_index_batch, + self.iterator.click_news_entity_index_batch, + hparams, + ) + + nn_input = tf.concat( + [click_news_embed_batch, candidate_news_embed_batch], axis=1 + ) + + dnn_channel_part = 2 + last_layer_size = dnn_channel_part * self.num_filters_total + layer_idx = 0 + hidden_nn_layers = [] + hidden_nn_layers.append(nn_input) + with tf.compat.v1.variable_scope("nn_part", initializer=self.initializer): + for idx, layer_size in enumerate(hparams.layer_sizes): + curr_w_nn_layer = tf.compat.v1.get_variable( + name="w_nn_layer" + str(layer_idx), + shape=[last_layer_size, layer_size], + dtype=tf.float32, + ) + curr_b_nn_layer = tf.compat.v1.get_variable( + name="b_nn_layer" + str(layer_idx), + shape=[layer_size], + dtype=tf.float32, + ) + curr_hidden_nn_layer = tf.compat.v1.nn.xw_plus_b( + hidden_nn_layers[layer_idx], curr_w_nn_layer, curr_b_nn_layer + ) + if hparams.enable_BN is True: + curr_hidden_nn_layer = tf.compat.v1.layers.batch_normalization( + curr_hidden_nn_layer, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + activation = hparams.activation[idx] + curr_hidden_nn_layer = self._active_layer( + logit=curr_hidden_nn_layer, activation=activation + ) + hidden_nn_layers.append(curr_hidden_nn_layer) + layer_idx += 1 + last_layer_size = layer_size + self.layer_params.append(curr_w_nn_layer) + self.layer_params.append(curr_b_nn_layer) + + w_nn_output = tf.compat.v1.get_variable( + name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32 + ) + b_nn_output = tf.compat.v1.get_variable( + name="b_nn_output", shape=[1], dtype=tf.float32 + ) + self.layer_params.append(w_nn_output) + self.layer_params.append(b_nn_output) + nn_output = tf.compat.v1.nn.xw_plus_b( + hidden_nn_layers[-1], w_nn_output, b_nn_output + ) + return nn_output + + def _build_pair_attention( + self, + candidate_word_batch, + candidate_entity_batch, + click_word_batch, + click_entity_batch, + hparams, + ): + """This function learns the candidate news article's embedding and user embedding. + User embedding is generated from click history and also depends on the candidate news article via attention mechanism. + Article embedding is generated via KCNN module. + Args: + candidate_word_batch (object): tensor word indices for constructing news article + candidate_entity_batch (object): tensor entity values for constructing news article + click_word_batch (object): tensor word indices for constructing user clicked history + click_entity_batch (object): tensor entity indices for constructing user clicked history + hparams (object): global hyper-parameters + Returns: + click_field_embed_final_batch: user embedding + news_field_embed_final_batch: candidate news article embedding + + """ + doc_size = hparams.doc_size + attention_hidden_sizes = hparams.attention_layer_sizes + + clicked_words = tf.reshape(click_word_batch, shape=[-1, doc_size]) + clicked_entities = tf.reshape(click_entity_batch, shape=[-1, doc_size]) + + with tf.compat.v1.variable_scope( + "attention_net", initializer=self.initializer + ) as scope: # noqa: F841 + + # use kims cnn to get conv embedding + with tf.compat.v1.variable_scope( + "kcnn", initializer=self.initializer, reuse=tf.compat.v1.AUTO_REUSE + ) as cnn_scope: # noqa: F841 + news_field_embed = self._kims_cnn( + candidate_word_batch, candidate_entity_batch, hparams + ) + click_field_embed = self._kims_cnn( + clicked_words, clicked_entities, hparams + ) + click_field_embed = tf.reshape( + click_field_embed, + shape=[ + -1, + hparams.history_size, + hparams.num_filters * len(hparams.filter_sizes), + ], + ) + + avg_strategy = False + if avg_strategy: + click_field_embed_final = tf.reduce_mean( + input_tensor=click_field_embed, axis=1, keepdims=True + ) + else: + news_field_embed = tf.expand_dims(news_field_embed, 1) + news_field_embed_repeat = tf.add( + tf.zeros_like(click_field_embed), news_field_embed + ) + attention_x = tf.concat( + axis=-1, values=[click_field_embed, news_field_embed_repeat] + ) + attention_x = tf.reshape( + attention_x, shape=[-1, self.num_filters_total * 2] + ) + attention_w = tf.compat.v1.get_variable( + name="attention_hidden_w", + shape=[self.num_filters_total * 2, attention_hidden_sizes], + dtype=tf.float32, + ) + attention_b = tf.compat.v1.get_variable( + name="attention_hidden_b", + shape=[attention_hidden_sizes], + dtype=tf.float32, + ) + curr_attention_layer = tf.compat.v1.nn.xw_plus_b( + attention_x, attention_w, attention_b + ) + + if hparams.enable_BN is True: + curr_attention_layer = tf.compat.v1.layers.batch_normalization( + curr_attention_layer, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + activation = hparams.attention_activation + curr_attention_layer = self._active_layer( + logit=curr_attention_layer, activation=activation + ) + attention_output_w = tf.compat.v1.get_variable( + name="attention_output_w", + shape=[attention_hidden_sizes, 1], + dtype=tf.float32, + ) + attention_output_b = tf.compat.v1.get_variable( + name="attention_output_b", shape=[1], dtype=tf.float32 + ) + attention_weight = tf.compat.v1.nn.xw_plus_b( + curr_attention_layer, attention_output_w, attention_output_b + ) + attention_weight = tf.reshape( + attention_weight, shape=[-1, hparams.history_size, 1] + ) + norm_attention_weight = tf.nn.softmax(attention_weight, axis=1) + click_field_embed_final = tf.reduce_sum( + input_tensor=tf.multiply(click_field_embed, norm_attention_weight), + axis=1, + keepdims=True, + ) + if attention_w not in self.layer_params: + self.layer_params.append(attention_w) + if attention_b not in self.layer_params: + self.layer_params.append(attention_b) + if attention_output_w not in self.layer_params: + self.layer_params.append(attention_output_w) + if attention_output_b not in self.layer_params: + self.layer_params.append(attention_output_b) + self.news_field_embed_final_batch = tf.squeeze(news_field_embed) + click_field_embed_final_batch = tf.squeeze(click_field_embed_final) + + return click_field_embed_final_batch, self.news_field_embed_final_batch + + def _kims_cnn(self, word, entity, hparams): + """The KCNN module. KCNN is an extension of traditional CNN that incorporates symbolic knowledge from + a knowledge graph into sentence representation learning. + Args: + word (object): word indices for the sentence. + entity (object): entity indices for the sentence. Entities are aligned with words in the sentence. + hparams (object): global hyper-parameters. + + Returns: + object: Sentence representation. + """ + # kims cnn parameter + filter_sizes = hparams.filter_sizes + num_filters = hparams.num_filters + + dim = hparams.dim + embedded_chars = tf.nn.embedding_lookup(params=self.embedding, ids=word) + if hparams.use_entity and hparams.use_context: + entity_embedded_chars = tf.nn.embedding_lookup( + params=self.entity_embedding, ids=entity + ) + context_embedded_chars = tf.nn.embedding_lookup( + params=self.context_embedding, ids=entity + ) + concat = tf.concat( + [embedded_chars, entity_embedded_chars, context_embedded_chars], axis=-1 + ) + elif hparams.use_entity: + entity_embedded_chars = tf.nn.embedding_lookup( + params=self.entity_embedding, ids=entity + ) + concat = tf.concat([embedded_chars, entity_embedded_chars], axis=-1) + else: + concat = embedded_chars + concat_expanded = tf.expand_dims(concat, -1) + + # Create a convolution + maxpool layer for each filter size + pooled_outputs = [] + for i, filter_size in enumerate(filter_sizes): + with tf.compat.v1.variable_scope( + "conv-maxpool-%s" % filter_size, initializer=self.initializer + ): + # Convolution Layer + if hparams.use_entity and hparams.use_context: + filter_shape = [filter_size, dim * 3, 1, num_filters] + elif hparams.use_entity: + filter_shape = [filter_size, dim * 2, 1, num_filters] + else: + filter_shape = [filter_size, dim, 1, num_filters] + W = tf.compat.v1.get_variable( + name="W" + "_filter_size_" + str(filter_size), + shape=filter_shape, + dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution=("uniform" if False else "truncated_normal"), + ), + ) + b = tf.compat.v1.get_variable( + name="b" + "_filter_size_" + str(filter_size), + shape=[num_filters], + dtype=tf.float32, + ) + if W not in self.layer_params: + self.layer_params.append(W) + if b not in self.layer_params: + self.layer_params.append(b) + conv = tf.nn.conv2d( + input=concat_expanded, + filters=W, + strides=[1, 1, 1, 1], + padding="VALID", + name="conv", + ) + # Apply nonlinearity + h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu") + # Maxpooling over the outputs + pooled = tf.nn.max_pool2d( + h, + ksize=[1, hparams.doc_size - filter_size + 1, 1, 1], + strides=[1, 1, 1, 1], + padding="VALID", + name="pool", + ) + pooled_outputs.append(pooled) + # Combine all the pooled features + # self.num_filters_total is the kims cnn output dimension + self.num_filters_total = num_filters * len(filter_sizes) + h_pool = tf.concat(pooled_outputs, axis=-1) + h_pool_flat = tf.reshape(h_pool, [-1, self.num_filters_total]) + return h_pool_flat + +
[docs] def infer_embedding(self, sess, feed_dict): + """Infer document embedding in feed_dict with current model. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values. + + Returns: + list: News embedding in a batch. + """ + feed_dict[self.layer_keeps] = self.keep_prob_test + feed_dict[self.is_train_stage] = False + return sess.run([self.news_field_embed_final_batch], feed_dict=feed_dict)
+ +
[docs] def run_get_embedding(self, infile_name, outfile_name): + """infer document embedding with current model. + + Args: + infile_name (str): Input file name, format is [Newsid] [w1,w2,w3...] [e1,e2,e3...] + outfile_name (str): Output file name, format is [Newsid] [embedding] + + Returns: + object: An instance of self. + """ + load_sess = self.sess + with tf.io.gfile.GFile(outfile_name, "w") as wt: + for ( + batch_data_input, + newsid_list, + data_size, + ) in self.iterator.load_infer_data_from_file(infile_name): + news_embedding = self.infer_embedding(load_sess, batch_data_input)[0] + for i in range(data_size): + wt.write( + newsid_list[i] + + " " + + ",".join( + [ + str(embedding_value) + for embedding_value in news_embedding[i] + ] + ) + + "\n" + ) + return self
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/dkn_item2item.html b/_modules/recommenders/models/deeprec/models/dkn_item2item.html new file mode 100644 index 0000000000..fb90335018 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/dkn_item2item.html @@ -0,0 +1,518 @@ + + + + + + + + + + + recommenders.models.deeprec.models.dkn_item2item — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.dkn_item2item

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+from recommenders.models.deeprec.models.dkn import DKN
+from recommenders.models.deeprec.deeprec_utils import cal_metric
+
+"""
+This new model adapts DKN's structure for item-to-item recommendations.
+The tutorial can be found at: https://github.com/microsoft/recommenders/blob/main/examples/07_tutorials/KDD2020-tutorial/step4_run_dkn_item2item.ipynb
+ """
+
+
+
[docs]class DKNItem2Item(DKN): + """Class for item-to-item recommendations using DKN. + See https://github.com/microsoft/recommenders/blob/main/examples/07_tutorials/KDD2020-tutorial/step4_run_dkn_item2item.ipynb""" + + def _compute_data_loss(self): + logits = self.pred + data_loss = -1 * tf.reduce_sum(input_tensor=tf.math.log(logits[:, 0] + 1e-10)) + return data_loss + + def _build_dkn(self): + """The main function to create DKN's logic. + + Returns: + object: Prediction of item2item relation scores made by the DKN model, in the shape of (`batch_size`, `num_negative` + 1). + """ + news_field_embed_final_batch = self._build_doc_embedding( + self.iterator.candidate_news_index_batch, + self.iterator.candidate_news_entity_index_batch, + ) + + self.news_field_embed_final_batch = tf.math.l2_normalize( + news_field_embed_final_batch, axis=-1, epsilon=1e-12 + ) + + item_embs_train = tf.reshape( + self.news_field_embed_final_batch, + [ + -1, + self.iterator.neg_num + 2, + self.news_field_embed_final_batch.shape[-1], + ], + ) # (B, group, D) + + item_embs_source = item_embs_train[:, 0, :] # get the source item + item_embs_source = tf.expand_dims(item_embs_source, 1) + + item_embs_target = item_embs_train[:, 1:, :] + + item_relation = tf.math.multiply(item_embs_target, item_embs_source) + item_relation = tf.reduce_sum( + input_tensor=item_relation, axis=-1 + ) # (B, neg_num + 1) + + self.pred_logits = item_relation + + return self.pred_logits + + def _get_pred(self, logit, task): + return tf.nn.softmax(logit, axis=-1) + + def _build_doc_embedding(self, candidate_word_batch, candidate_entity_batch): + """ + To make the document embedding be dense, we add one tanh layer on top of the `kims_cnn` module. + """ + with tf.compat.v1.variable_scope("kcnn", initializer=self.initializer): + news_field_embed = self._kims_cnn( + candidate_word_batch, candidate_entity_batch, self.hparams + ) + W = tf.compat.v1.get_variable( + name="W_doc_trans", + shape=(news_field_embed.shape[-1], self.num_filters_total), + dtype=tf.float32, + initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution=("uniform" if False else "truncated_normal"), + ), + ) + if W not in self.layer_params: + self.layer_params.append(W) + news_field_embed = tf.tanh(tf.matmul(news_field_embed, W)) + return news_field_embed + +
[docs] def eval(self, sess, feed_dict): + """Evaluate the data in `feed_dict` with current model. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values. + + Returns: + numpy.ndarray, numpy.ndarray: A tuple with predictions and labels arrays. + """ + feed_dict[self.layer_keeps] = self.keep_prob_test + feed_dict[self.is_train_stage] = False + preds = sess.run(self.pred, feed_dict=feed_dict) + labels = np.zeros_like(preds, dtype=np.int32) + labels[:, 0] = 1 + return (preds, labels)
+ +
[docs] def run_eval(self, filename): + """Evaluate the given file and returns some evaluation metrics. + + Args: + filename (str): A file name that will be evaluated. + + Returns: + dict: A dictionary containing evaluation metrics. + """ + load_sess = self.sess + group_preds = [] + group_labels = [] + + for ( + batch_data_input, + newsid_list, + data_size, + ) in self.iterator.load_data_from_file(filename): + if batch_data_input: + step_pred, step_labels = self.eval(load_sess, batch_data_input) + group_preds.extend(step_pred) + group_labels.extend(step_labels) + + res = cal_metric(group_labels, group_preds, self.hparams.pairwise_metrics) + return res
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/graphrec/lightgcn.html b/_modules/recommenders/models/deeprec/models/graphrec/lightgcn.html new file mode 100644 index 0000000000..aad87c4fea --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/graphrec/lightgcn.html @@ -0,0 +1,806 @@ + + + + + + + + + + + recommenders.models.deeprec.models.graphrec.lightgcn — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.graphrec.lightgcn

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import time
+import os
+import sys
+import numpy as np
+import pandas as pd
+from recommenders.evaluation.python_evaluation import (
+    map_at_k,
+    ndcg_at_k,
+    precision_at_k,
+    recall_at_k,
+)
+from recommenders.utils.python_utils import get_top_k_scored_items
+
+tf.compat.v1.disable_eager_execution()  # need to disable eager in TF2.x
+
+
+
[docs]class LightGCN(object): + """LightGCN model + + :Citation: + + He, Xiangnan, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. + "LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation." arXiv + preprint arXiv:2002.02126, 2020. + """ + + def __init__(self, hparams, data, seed=None): + """Initializing the model. Create parameters, placeholders, embeddings and loss function. + + Args: + hparams (HParams): A HParams object, hold the entire set of hyperparameters. + data (object): A recommenders.models.deeprec.DataModel.ImplicitCF object, load and process data. + seed (int): Seed. + + """ + + tf.compat.v1.set_random_seed(seed) + np.random.seed(seed) + + self.data = data + self.epochs = hparams.epochs + self.lr = hparams.learning_rate + self.emb_dim = hparams.embed_size + self.batch_size = hparams.batch_size + self.n_layers = hparams.n_layers + self.decay = hparams.decay + self.eval_epoch = hparams.eval_epoch + self.top_k = hparams.top_k + self.save_model = hparams.save_model + self.save_epoch = hparams.save_epoch + self.metrics = hparams.metrics + self.model_dir = hparams.MODEL_DIR + + metric_options = ["map", "ndcg", "precision", "recall"] + for metric in self.metrics: + if metric not in metric_options: + raise ValueError( + "Wrong metric(s), please select one of this list: {}".format( + metric_options + ) + ) + + self.norm_adj = data.get_norm_adj_mat() + + self.n_users = data.n_users + self.n_items = data.n_items + + self.users = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + self.pos_items = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + self.neg_items = tf.compat.v1.placeholder(tf.int32, shape=(None,)) + + self.weights = self._init_weights() + self.ua_embeddings, self.ia_embeddings = self._create_lightgcn_embed() + + self.u_g_embeddings = tf.nn.embedding_lookup( + params=self.ua_embeddings, ids=self.users + ) + self.pos_i_g_embeddings = tf.nn.embedding_lookup( + params=self.ia_embeddings, ids=self.pos_items + ) + self.neg_i_g_embeddings = tf.nn.embedding_lookup( + params=self.ia_embeddings, ids=self.neg_items + ) + self.u_g_embeddings_pre = tf.nn.embedding_lookup( + params=self.weights["user_embedding"], ids=self.users + ) + self.pos_i_g_embeddings_pre = tf.nn.embedding_lookup( + params=self.weights["item_embedding"], ids=self.pos_items + ) + self.neg_i_g_embeddings_pre = tf.nn.embedding_lookup( + params=self.weights["item_embedding"], ids=self.neg_items + ) + + self.batch_ratings = tf.matmul( + self.u_g_embeddings, + self.pos_i_g_embeddings, + transpose_a=False, + transpose_b=True, + ) + + self.mf_loss, self.emb_loss = self._create_bpr_loss( + self.u_g_embeddings, self.pos_i_g_embeddings, self.neg_i_g_embeddings + ) + self.loss = self.mf_loss + self.emb_loss + + self.opt = tf.compat.v1.train.AdamOptimizer(learning_rate=self.lr).minimize( + self.loss + ) + self.saver = tf.compat.v1.train.Saver(max_to_keep=1) + + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + self.sess = tf.compat.v1.Session( + config=tf.compat.v1.ConfigProto(gpu_options=gpu_options) + ) + self.sess.run(tf.compat.v1.global_variables_initializer()) + + def _init_weights(self): + """Initialize user and item embeddings. + + Returns: + dict: With keys `user_embedding` and `item_embedding`, embeddings of all users and items. + + """ + all_weights = dict() + initializer = tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, mode="fan_avg", distribution="uniform" + ) + + all_weights["user_embedding"] = tf.Variable( + initializer([self.n_users, self.emb_dim]), name="user_embedding" + ) + all_weights["item_embedding"] = tf.Variable( + initializer([self.n_items, self.emb_dim]), name="item_embedding" + ) + print("Using xavier initialization.") + + return all_weights + + def _create_lightgcn_embed(self): + """Calculate the average embeddings of users and items after every layer of the model. + + Returns: + tf.Tensor, tf.Tensor: Average user embeddings. Average item embeddings. + + """ + A_hat = self._convert_sp_mat_to_sp_tensor(self.norm_adj) + + ego_embeddings = tf.concat( + [self.weights["user_embedding"], self.weights["item_embedding"]], axis=0 + ) + all_embeddings = [ego_embeddings] + + for k in range(0, self.n_layers): + ego_embeddings = tf.sparse.sparse_dense_matmul(A_hat, ego_embeddings) + all_embeddings += [ego_embeddings] + + all_embeddings = tf.stack(all_embeddings, 1) + all_embeddings = tf.reduce_mean( + input_tensor=all_embeddings, axis=1, keepdims=False + ) + u_g_embeddings, i_g_embeddings = tf.split( + all_embeddings, [self.n_users, self.n_items], 0 + ) + return u_g_embeddings, i_g_embeddings + + def _create_bpr_loss(self, users, pos_items, neg_items): + """Calculate BPR loss. + + Args: + users (tf.Tensor): User embeddings to calculate loss. + pos_items (tf.Tensor): Positive item embeddings to calculate loss. + neg_items (tf.Tensor): Negative item embeddings to calculate loss. + + Returns: + tf.Tensor, tf.Tensor: Matrix factorization loss. Embedding regularization loss. + + """ + pos_scores = tf.reduce_sum(input_tensor=tf.multiply(users, pos_items), axis=1) + neg_scores = tf.reduce_sum(input_tensor=tf.multiply(users, neg_items), axis=1) + + regularizer = ( + tf.nn.l2_loss(self.u_g_embeddings_pre) + + tf.nn.l2_loss(self.pos_i_g_embeddings_pre) + + tf.nn.l2_loss(self.neg_i_g_embeddings_pre) + ) + regularizer = regularizer / self.batch_size + mf_loss = tf.reduce_mean( + input_tensor=tf.nn.softplus(-(pos_scores - neg_scores)) + ) + emb_loss = self.decay * regularizer + return mf_loss, emb_loss + + def _convert_sp_mat_to_sp_tensor(self, X): + """Convert a scipy sparse matrix to tf.SparseTensor. + + Returns: + tf.SparseTensor: SparseTensor after conversion. + + """ + coo = X.tocoo().astype(np.float32) + indices = np.mat([coo.row, coo.col]).transpose() + return tf.SparseTensor(indices, coo.data, coo.shape) + +
[docs] def fit(self): + """Fit the model on self.data.train. If eval_epoch is not -1, evaluate the model on `self.data.test` + every `eval_epoch` epoch to observe the training status. + + """ + for epoch in range(1, self.epochs + 1): + train_start = time.time() + loss, mf_loss, emb_loss = 0.0, 0.0, 0.0 + n_batch = self.data.train.shape[0] // self.batch_size + 1 + for idx in range(n_batch): + users, pos_items, neg_items = self.data.train_loader(self.batch_size) + _, batch_loss, batch_mf_loss, batch_emb_loss = self.sess.run( + [self.opt, self.loss, self.mf_loss, self.emb_loss], + feed_dict={ + self.users: users, + self.pos_items: pos_items, + self.neg_items: neg_items, + }, + ) + loss += batch_loss / n_batch + mf_loss += batch_mf_loss / n_batch + emb_loss += batch_emb_loss / n_batch + + if np.isnan(loss): + print("ERROR: loss is nan.") + sys.exit() + train_end = time.time() + train_time = train_end - train_start + + if self.save_model and epoch % self.save_epoch == 0: + save_path_str = os.path.join(self.model_dir, "epoch_" + str(epoch)) + if not os.path.exists(save_path_str): + os.makedirs(save_path_str) + checkpoint_path = self.saver.save( # noqa: F841 + sess=self.sess, save_path=save_path_str + ) + print("Save model to path {0}".format(os.path.abspath(save_path_str))) + + if self.eval_epoch == -1 or epoch % self.eval_epoch != 0: + print( + "Epoch %d (train)%.1fs: train loss = %.5f = (mf)%.5f + (embed)%.5f" + % (epoch, train_time, loss, mf_loss, emb_loss) + ) + else: + eval_start = time.time() + ret = self.run_eval() + eval_end = time.time() + eval_time = eval_end - eval_start + + print( + "Epoch %d (train)%.1fs + (eval)%.1fs: train loss = %.5f = (mf)%.5f + (embed)%.5f, %s" + % ( + epoch, + train_time, + eval_time, + loss, + mf_loss, + emb_loss, + ", ".join( + metric + " = %.5f" % (r) + for metric, r in zip(self.metrics, ret) + ), + ) + )
+ +
[docs] def load(self, model_path=None): + """Load an existing model. + + Args: + model_path: Model path. + + Raises: + IOError: if the restore operation failed. + + """ + try: + self.saver.restore(self.sess, model_path) + except Exception: + raise IOError( + "Failed to find any matching files for {0}".format(model_path) + )
+ +
[docs] def run_eval(self): + """Run evaluation on self.data.test. + + Returns: + dict: Results of all metrics in `self.metrics`. + """ + topk_scores = self.recommend_k_items( + self.data.test, top_k=self.top_k, use_id=True + ) + ret = [] + for metric in self.metrics: + if metric == "map": + ret.append(map_at_k(self.data.test, topk_scores, k=self.top_k)) + elif metric == "ndcg": + ret.append(ndcg_at_k(self.data.test, topk_scores, k=self.top_k)) + elif metric == "precision": + ret.append(precision_at_k(self.data.test, topk_scores, k=self.top_k)) + elif metric == "recall": + ret.append(recall_at_k(self.data.test, topk_scores, k=self.top_k)) + return ret
+ +
[docs] def score(self, user_ids, remove_seen=True): + """Score all items for test users. + + Args: + user_ids (np.array): Users to test. + remove_seen (bool): Flag to remove items seen in training from recommendation. + + Returns: + numpy.ndarray: Value of interest of all items for the users. + + """ + if any(np.isnan(user_ids)): + raise ValueError( + "LightGCN cannot score users that are not in the training set" + ) + u_batch_size = self.batch_size + n_user_batchs = len(user_ids) // u_batch_size + 1 + test_scores = [] + for u_batch_id in range(n_user_batchs): + start = u_batch_id * u_batch_size + end = (u_batch_id + 1) * u_batch_size + user_batch = user_ids[start:end] + item_batch = range(self.data.n_items) + rate_batch = self.sess.run( + self.batch_ratings, {self.users: user_batch, self.pos_items: item_batch} + ) + test_scores.append(np.array(rate_batch)) + test_scores = np.concatenate(test_scores, axis=0) + if remove_seen: + test_scores += self.data.R.tocsr()[user_ids, :] * -np.inf + return test_scores
+ +
[docs] def recommend_k_items( + self, test, top_k=10, sort_top_k=True, remove_seen=True, use_id=False + ): + """Recommend top K items for all users in the test set. + + Args: + test (pandas.DataFrame): Test data. + top_k (int): Number of top items to recommend. + sort_top_k (bool): Flag to sort top k results. + remove_seen (bool): Flag to remove items seen in training from recommendation. + + Returns: + pandas.DataFrame: Top k recommendation items for each user. + + """ + data = self.data + if not use_id: + user_ids = np.array([data.user2id[x] for x in test[data.col_user].unique()]) + else: + user_ids = np.array(test[data.col_user].unique()) + + test_scores = self.score(user_ids, remove_seen=remove_seen) + + top_items, top_scores = get_top_k_scored_items( + scores=test_scores, top_k=top_k, sort_top_k=sort_top_k + ) + + df = pd.DataFrame( + { + data.col_user: np.repeat( + test[data.col_user].drop_duplicates().values, top_items.shape[1] + ), + data.col_item: top_items.flatten() + if use_id + else [data.id2item[item] for item in top_items.flatten()], + data.col_prediction: top_scores.flatten(), + } + ) + + return df.replace(-np.inf, np.nan).dropna()
+ + def output_embeddings(self, idmapper, n, target, user_file): + embeddings = list(target.eval(session=self.sess)) + with open(user_file, "w") as wt: + for i in range(n): + wt.write( + "{0}\t{1}\n".format( + idmapper[i], " ".join([str(a) for a in embeddings[i]]) + ) + ) + +
[docs] def infer_embedding(self, user_file, item_file): + """Export user and item embeddings to csv files. + + Args: + user_file (str): Path of file to save user embeddings. + item_file (str): Path of file to save item embeddings. + + """ + # create output directories if they do not exist + dirs, _ = os.path.split(user_file) + if not os.path.exists(dirs): + os.makedirs(dirs) + dirs, _ = os.path.split(item_file) + if not os.path.exists(dirs): + os.makedirs(dirs) + + data = self.data + + self.output_embeddings( + data.id2user, self.n_users, self.ua_embeddings, user_file + ) + self.output_embeddings( + data.id2item, self.n_items, self.ia_embeddings, item_file + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/asvd.html b/_modules/recommenders/models/deeprec/models/sequential/asvd.html new file mode 100644 index 0000000000..8fdeb24d3a --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/asvd.html @@ -0,0 +1,436 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.asvd — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.asvd

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+
+__all__ = ["A2SVDModel"]
+
+
+
[docs]class A2SVDModel(SequentialBaseModel): + """A2SVD Model (Attentive Asynchronous Singular Value Decomposition) + + It extends ASVD with an attention module. + + :Citation: + + ASVD: Y. Koren, "Factorization Meets the Neighborhood: a Multifaceted Collaborative + Filtering Model", in Proceedings of the 14th ACM SIGKDD international conference on + Knowledge discovery and data mining, pages 426–434, ACM, 2008. + + A2SVD: Z. Yu, J. Lian, A. Mahmoody, G. Liu and X. Xie, "Adaptive User Modeling with + Long and Short-Term Preferences for Personailzed Recommendation", in Proceedings of + the 28th International Joint Conferences on Artificial Intelligence, IJCAI’19, + Pages 4213-4219, AAAI Press, 2019. + """ + + def _build_seq_graph(self): + """The main function to create A2SVD model. + + Returns: + object: The output of A2SVD section. + """ + hparams = self.hparams + with tf.compat.v1.variable_scope("a2svd"): + hist_input = tf.concat( + [self.item_history_embedding, self.cate_history_embedding], 2 + ) + with tf.compat.v1.variable_scope("Attention_layer"): + att_outputs1 = self._attention(hist_input, hparams.attention_size) + asvd_output = tf.reduce_sum(input_tensor=att_outputs1, axis=1) + tf.compat.v1.summary.histogram("a2svd_output", asvd_output) + model_output = tf.concat([asvd_output, self.target_item_embedding], 1) + self.model_output = model_output + tf.compat.v1.summary.histogram("model_output", model_output) + return model_output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/caser.html b/_modules/recommenders/models/deeprec/models/sequential/caser.html new file mode 100644 index 0000000000..ec04790077 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/caser.html @@ -0,0 +1,497 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.caser — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.caser

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+
+__all__ = ["CaserModel"]
+
+
+
[docs]class CaserModel(SequentialBaseModel): + """Caser Model + + :Citation: + + J. Tang and K. Wang, "Personalized top-n sequential recommendation via convolutional + sequence embedding", in Proceedings of the Eleventh ACM International Conference on + Web Search and Data Mining, ACM, 2018. + """ + + def __init__(self, hparams, iterator_creator, seed=None): + """Initialization of variables for caser + + Args: + hparams (HParams): A HParams object, hold the entire set of hyperparameters. + iterator_creator (object): An iterator to load the data. + """ + self.hparams = hparams + self.L = hparams.L # history sequence that involved in convolution shape + self.T = hparams.T # prediction shape + self.n_v = hparams.n_v # number of vertical convolution layers + self.n_h = hparams.n_h # number of horizonal convolution layers + self.lengths = [ + i + 1 for i in range(self.L) + ] # horizonal convolution filter shape + super().__init__(hparams, iterator_creator, seed=seed) + + def _build_seq_graph(self): + """The main function to create caser model. + + Returns: + object: The output of caser section. + """ + with tf.compat.v1.variable_scope("caser"): + cnn_output = self._caser_cnn() + model_output = tf.concat([cnn_output, self.target_item_embedding], 1) + tf.compat.v1.summary.histogram("model_output", model_output) + return model_output + + def _add_cnn(self, hist_matrix, vertical_dim, scope): + """The main function to use CNN at both vertical and horizonal aspects. + + Args: + hist_matrix (object): The output of history sequential embeddings + vertical_dim (int): The shape of embeddings of input + scope (object): The scope of CNN input. + + Returns: + object: The output of CNN layers. + """ + with tf.compat.v1.variable_scope(scope): + with tf.compat.v1.variable_scope("vertical"): + embedding_T = tf.transpose(a=hist_matrix, perm=[0, 2, 1]) + out_v = self._build_cnn(embedding_T, self.n_v, vertical_dim) + out_v = tf.compat.v1.layers.flatten(out_v) + with tf.compat.v1.variable_scope("horizonal"): + out_hs = [] + for h in self.lengths: + conv_out = self._build_cnn(hist_matrix, self.n_h, h) + max_pool_out = tf.reduce_max( + input_tensor=conv_out, axis=[1], name="max_pool_{0}".format(h) + ) + out_hs.append(max_pool_out) + out_h = tf.concat(out_hs, 1) + return tf.concat([out_v, out_h], 1) + + def _caser_cnn(self): + """The main function to use CNN at both item and category aspects. + + Returns: + object: The concatenated output of two parts of item and category. + """ + item_out = self._add_cnn( + self.item_history_embedding, self.item_embedding_dim, "item" + ) + tf.compat.v1.summary.histogram("item_out", item_out) + cate_out = self._add_cnn( + self.cate_history_embedding, self.cate_embedding_dim, "cate" + ) + tf.compat.v1.summary.histogram("cate_out", cate_out) + cnn_output = tf.concat([item_out, cate_out], 1) + tf.compat.v1.summary.histogram("cnn_output", cnn_output) + return cnn_output + + def _build_cnn(self, history_matrix, nums, shape): + """Call a CNN layer. + + Returns: + object: The output of cnn section. + """ + return tf.compat.v1.layers.conv1d( + history_matrix, + nums, + shape, + activation=tf.nn.relu, + name="conv_" + str(shape), + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/gru.html b/_modules/recommenders/models/deeprec/models/sequential/gru.html new file mode 100644 index 0000000000..ab9a76913c --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/gru.html @@ -0,0 +1,469 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.gru — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.gru

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from keras.layers.legacy_rnn.rnn_cell_impl import GRUCell, LSTMCell
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+from tensorflow.compat.v1.nn import dynamic_rnn
+
+__all__ = ["GRUModel"]
+
+
+
[docs]class GRUModel(SequentialBaseModel): + """GRU Model + + :Citation: + + Kyunghyun Cho, Bart van Merrienboer, Caglar Gulcehre, Dzmitry Bahdanau, + Fethi Bougares, Holger Schwenk, and Yoshua Bengio. Learning Phrase + Representations using RNN Encoder-Decoder for Statistical Machine Translation. + arXiv preprint arXiv:1406.1078. 2014. + """ + + def _build_seq_graph(self): + """The main function to create GRU model. + + Returns: + object:the output of GRU section. + """ + with tf.compat.v1.variable_scope("gru"): + # final_state = self._build_lstm() + final_state = self._build_gru() + model_output = tf.concat([final_state, self.target_item_embedding], 1) + tf.compat.v1.summary.histogram("model_output", model_output) + return model_output + + def _build_lstm(self): + """Apply an LSTM for modeling. + + Returns: + object: The output of LSTM section. + """ + with tf.compat.v1.name_scope("lstm"): + self.mask = self.iterator.mask + self.sequence_length = tf.reduce_sum(input_tensor=self.mask, axis=1) + self.history_embedding = tf.concat( + [self.item_history_embedding, self.cate_history_embedding], 2 + ) + rnn_outputs, final_state = dynamic_rnn( + LSTMCell(self.hidden_size), + inputs=self.history_embedding, + sequence_length=self.sequence_length, + dtype=tf.float32, + scope="lstm", + ) + tf.compat.v1.summary.histogram("LSTM_outputs", rnn_outputs) + return final_state[1] + + def _build_gru(self): + """Apply a GRU for modeling. + + Returns: + object: The output of GRU section. + """ + with tf.compat.v1.name_scope("gru"): + self.mask = self.iterator.mask + self.sequence_length = tf.reduce_sum(input_tensor=self.mask, axis=1) + self.history_embedding = tf.concat( + [self.item_history_embedding, self.cate_history_embedding], 2 + ) + rnn_outputs, final_state = dynamic_rnn( + GRUCell(self.hidden_size), + inputs=self.history_embedding, + sequence_length=self.sequence_length, + dtype=tf.float32, + scope="gru", + ) + tf.compat.v1.summary.histogram("GRU_outputs", rnn_outputs) + return final_state
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/nextitnet.html b/_modules/recommenders/models/deeprec/models/sequential/nextitnet.html new file mode 100644 index 0000000000..a2330ab55e --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/nextitnet.html @@ -0,0 +1,627 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.nextitnet — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.nextitnet

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+
+__all__ = ["NextItNetModel"]
+
+
+
[docs]class NextItNetModel(SequentialBaseModel): + """NextItNet Model + + :Citation: + Yuan, Fajie, et al. "A Simple Convolutional Generative Network + for Next Item Recommendation", in Web Search and Data Mining, 2019. + + Note: + It requires strong sequence with dataset. + """ + + def _build_seq_graph(self): + """The main function to create nextitnet model. + + Returns: + object: The output of nextitnet section. + """ + hparams = self.hparams + is_training = tf.equal(self.is_train_stage, True) + item_history_embedding = tf.cond( + pred=is_training, + true_fn=lambda: self.item_history_embedding[ + :: self.hparams.train_num_ngs + 1 + ], + false_fn=lambda: self.item_history_embedding, + ) + cate_history_embedding = tf.cond( + pred=is_training, + true_fn=lambda: self.cate_history_embedding[ + :: self.hparams.train_num_ngs + 1 + ], + false_fn=lambda: self.cate_history_embedding, + ) + + with tf.compat.v1.variable_scope("nextitnet", reuse=tf.compat.v1.AUTO_REUSE): + dilate_input = tf.concat( + [item_history_embedding, cate_history_embedding], 2 + ) + + for layer_id, dilation in enumerate(hparams.dilations): + dilate_input = tf.cond( + pred=is_training, + true_fn=lambda: self._nextitnet_residual_block_one( + dilate_input, + dilation, + layer_id, + dilate_input.get_shape()[-1], + hparams.kernel_size, + causal=True, + train=True, + ), + false_fn=lambda: self._nextitnet_residual_block_one( + dilate_input, + dilation, + layer_id, + dilate_input.get_shape()[-1], + hparams.kernel_size, + causal=True, + train=False, + ), + ) + + self.dilate_input = dilate_input + model_output = tf.cond( + pred=is_training, + true_fn=self._training_output, + false_fn=self._normal_output, + ) + + return model_output + + def _training_output(self): + model_output = tf.repeat( + self.dilate_input, self.hparams.train_num_ngs + 1, axis=0 + ) + model_output = tf.concat([model_output, self.target_item_embedding], -1) + model_output = tf.reshape( + model_output, + ( + -1, + self.hparams.train_num_ngs + 1, + self.hparams.max_seq_length, + model_output.get_shape()[-1], + ), + ) + model_output = tf.transpose(a=model_output, perm=[0, 2, 1, 3]) + model_output = tf.reshape(model_output, (-1, model_output.get_shape()[-1])) + return model_output + + def _normal_output(self): + model_output = self.dilate_input[:, -1, :] + model_output = tf.concat( + [model_output, self.target_item_embedding[:, -1, :]], -1 + ) + return model_output + + def _nextitnet_residual_block_one( + self, + input_, + dilation, + layer_id, + residual_channels, + kernel_size, + causal=True, + train=True, + ): + """The main function to use dilated CNN and residual network at sequence data + + Args: + input_ (object): The output of history sequential embeddings + dilation (int): The dilation number of CNN layer + layer_id (str): String value of layer ID, 0, 1, 2... + residual_channels (int): Embedding size of input sequence + kernel_size (int): Kernel size of CNN mask + causal (bool): Whether to pad in front of the sequence or to pad surroundingly + train (bool): is in training stage + + Returns: + object: The output of residual layers. + """ + resblock_type = "decoder" + resblock_name = "nextitnet_residual_block_one_{}_layer_{}_{}".format( + resblock_type, layer_id, dilation + ) + with tf.compat.v1.variable_scope(resblock_name): + input_ln = self._layer_norm(input_, name="layer_norm1", trainable=train) + relu1 = tf.nn.relu(input_ln) + conv1 = self._conv1d( + relu1, int(0.5 * int(residual_channels)), name="conv1d_1" + ) + conv1 = self._layer_norm(conv1, name="layer_norm2", trainable=train) + relu2 = tf.nn.relu(conv1) + + dilated_conv = self._conv1d( + relu2, + int(0.5 * int(residual_channels)), + dilation, + kernel_size, + causal=causal, + name="dilated_conv", + ) + + dilated_conv = self._layer_norm( + dilated_conv, name="layer_norm3", trainable=train + ) + relu3 = tf.nn.relu(dilated_conv) + conv2 = self._conv1d(relu3, residual_channels, name="conv1d_2") + return input_ + conv2 + + def _conv1d( + self, + input_, + output_channels, + dilation=1, + kernel_size=1, + causal=False, + name="dilated_conv", + ): + """Call a dilated CNN layer + + Returns: + object: The output of dilated CNN layers. + """ + with tf.compat.v1.variable_scope(name): + weight = tf.compat.v1.get_variable( + "weight", + [1, kernel_size, input_.get_shape()[-1], output_channels], + initializer=tf.compat.v1.truncated_normal_initializer( + stddev=0.02, seed=1 + ), + ) + bias = tf.compat.v1.get_variable( + "bias", + [output_channels], + initializer=tf.compat.v1.constant_initializer(0.0), + ) + + if causal: + padding = [[0, 0], [(kernel_size - 1) * dilation, 0], [0, 0]] + padded = tf.pad(tensor=input_, paddings=padding) + input_expanded = tf.expand_dims(padded, axis=1) + out = ( + tf.nn.atrous_conv2d( + input_expanded, weight, rate=dilation, padding="VALID" + ) + + bias + ) + else: + input_expanded = tf.expand_dims(input_, axis=1) + out = ( + tf.nn.conv2d( + input=input_expanded, + filters=weight, + strides=[1, 1, 1, 1], + padding="SAME", + ) + + bias + ) + + return tf.squeeze(out, [1]) + + def _layer_norm(self, x, name, epsilon=1e-8, trainable=True): + """Call a layer normalization + + Returns: + object: Normalized data + """ + with tf.compat.v1.variable_scope(name): + shape = x.get_shape() + beta = tf.compat.v1.get_variable( + "beta", + [int(shape[-1])], + initializer=tf.compat.v1.constant_initializer(0), + trainable=trainable, + ) + gamma = tf.compat.v1.get_variable( + "gamma", + [int(shape[-1])], + initializer=tf.compat.v1.constant_initializer(1), + trainable=trainable, + ) + + mean, variance = tf.nn.moments(x=x, axes=[len(shape) - 1], keepdims=True) + + x = (x - mean) / tf.sqrt(variance + epsilon) + + return gamma * x + beta
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/rnn_cell_implement.html b/_modules/recommenders/models/deeprec/models/sequential/rnn_cell_implement.html new file mode 100644 index 0000000000..bb11e04cb1 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/rnn_cell_implement.html @@ -0,0 +1,1049 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.rnn_cell_implement — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.rnn_cell_implement

+# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ==============================================================================
+"""Module implementing RNN Cells.
+
+This module provides a number of basic commonly used RNN cells, such as LSTM
+(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
+operators that allow adding dropouts, projections, or embeddings for inputs.
+Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
+calling the `rnn` ops several times.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import tensorflow as tf
+from tensorflow.python.ops import array_ops
+from tensorflow.python.ops import clip_ops
+from tensorflow.python.ops import init_ops
+from tensorflow.python.ops import math_ops
+from tensorflow.python.ops import nn_ops
+from tensorflow.python.ops import partitioned_variables
+from tensorflow.python.ops import variable_scope as vs
+from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.util import nest
+
+from tensorflow.python.ops.rnn_cell_impl import (
+    RNNCell,
+    LSTMStateTuple,
+    _BIAS_VARIABLE_NAME,
+    _WEIGHTS_VARIABLE_NAME,
+)
+
+
+
[docs]class Time4LSTMCell(RNNCell): + def __init__( + self, + num_units, + use_peepholes=False, + cell_clip=None, + initializer=None, + num_proj=None, + proj_clip=None, + num_unit_shards=None, + num_proj_shards=None, + forget_bias=1.0, + state_is_tuple=True, + activation=None, + reuse=None, + ): + super(Time4LSTMCell, self).__init__(_reuse=reuse) + if not state_is_tuple: + logging.warn( + "%s: Using a concatenated state is slower and will soon be " + "deprecated. Use state_is_tuple=True.", + self, + ) + if num_unit_shards is not None or num_proj_shards is not None: + logging.warn( + "%s: The num_unit_shards and proj_unit_shards parameters are " + "deprecated and will be removed in Jan 2017. " + "Use a variable scope with a partitioner instead.", + self, + ) + + self._num_units = num_units + self._use_peepholes = use_peepholes + self._cell_clip = cell_clip + self._initializer = initializer + self._num_proj = num_proj + self._proj_clip = proj_clip + self._num_unit_shards = num_unit_shards + self._num_proj_shards = num_proj_shards + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = activation or math_ops.tanh + + if num_proj: + self._state_size = ( + LSTMStateTuple(num_units, num_proj) + if state_is_tuple + else num_units + num_proj + ) + self._output_size = num_proj + else: + self._state_size = ( + LSTMStateTuple(num_units, num_units) + if state_is_tuple + else 2 * num_units + ) + self._output_size = num_units + self._linear1 = None + self._linear2 = None + self._time_input_w1 = None + self._time_input_w2 = None + self._time_kernel_w1 = None + self._time_kernel_t1 = None + self._time_bias1 = None + self._time_kernel_w2 = None + self._time_kernel_t2 = None + self._time_bias2 = None + self._o_kernel_t1 = None + self._o_kernel_t2 = None + if self._use_peepholes: + self._w_f_diag = None + self._w_i_diag = None + self._w_o_diag = None + + @property + def state_size(self): + return self._state_size + + @property + def output_size(self): + return self._output_size + +
[docs] def call(self, inputs, state): + """Call method for the Time4LSTMCell. + + Args: + inputs: A 2D Tensor of shape [batch_size, input_size]. + state: A 2D Tensor of shape [batch_size, state_size]. + + Returns: + A tuple containing: + - A 2D Tensor of shape [batch_size, output_size]. + - A 2D Tensor of shape [batch_size, state_size]. + """ + time_now_score = tf.expand_dims(inputs[:, -1], -1) + time_last_score = tf.expand_dims(inputs[:, -2], -1) + inputs = inputs[:, :-2] + num_proj = self._num_units if self._num_proj is None else self._num_proj + sigmoid = math_ops.sigmoid + + if self._state_is_tuple: + (c_prev, m_prev) = state + else: + c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) + m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) + + dtype = inputs.dtype + input_size = inputs.get_shape().with_rank(2)[1] + if input_size is None: + raise ValueError("Could not infer input size from inputs.get_shape()[-1]") + + if self._time_kernel_w1 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + with vs.variable_scope(unit_scope): + self._time_input_w1 = vs.get_variable( + "_time_input_w1", shape=[self._num_units], dtype=dtype + ) + self._time_input_bias1 = vs.get_variable( + "_time_input_bias1", shape=[self._num_units], dtype=dtype + ) + self._time_input_w2 = vs.get_variable( + "_time_input_w2", shape=[self._num_units], dtype=dtype + ) + self._time_input_bias2 = vs.get_variable( + "_time_input_bias2", shape=[self._num_units], dtype=dtype + ) + self._time_kernel_w1 = vs.get_variable( + "_time_kernel_w1", + shape=[input_size, self._num_units], + dtype=dtype, + ) + self._time_kernel_t1 = vs.get_variable( + "_time_kernel_t1", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._time_bias1 = vs.get_variable( + "_time_bias1", shape=[self._num_units], dtype=dtype + ) + self._time_kernel_w2 = vs.get_variable( + "_time_kernel_w2", + shape=[input_size, self._num_units], + dtype=dtype, + ) + self._time_kernel_t2 = vs.get_variable( + "_time_kernel_t2", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._time_bias2 = vs.get_variable( + "_time_bias2", shape=[self._num_units], dtype=dtype + ) + self._o_kernel_t1 = vs.get_variable( + "_o_kernel_t1", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._o_kernel_t2 = vs.get_variable( + "_o_kernel_t2", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + + time_now_input = tf.nn.tanh( + time_now_score * self._time_input_w1 + self._time_input_bias1 + ) + time_last_input = tf.nn.tanh( + time_last_score * self._time_input_w2 + self._time_input_bias2 + ) + + time_now_state = ( + math_ops.matmul(inputs, self._time_kernel_w1) + + math_ops.matmul(time_now_input, self._time_kernel_t1) + + self._time_bias1 + ) + time_last_state = ( + math_ops.matmul(inputs, self._time_kernel_w2) + + math_ops.matmul(time_last_input, self._time_kernel_t2) + + self._time_bias2 + ) + + if self._linear1 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + if self._num_unit_shards is not None: + unit_scope.set_partitioner( + partitioned_variables.fixed_size_partitioner( + self._num_unit_shards + ) + ) + self._linear1 = _Linear([inputs, m_prev], 4 * self._num_units, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + lstm_matrix = self._linear1([inputs, m_prev]) + i, j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=4, axis=1) + o = ( + o + + math_ops.matmul(time_now_input, self._o_kernel_t1) + + math_ops.matmul(time_last_input, self._o_kernel_t2) + ) + # Diagonal connections + if self._use_peepholes and not self._w_f_diag: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + with vs.variable_scope(unit_scope): + self._w_f_diag = vs.get_variable( + "w_f_diag", shape=[self._num_units], dtype=dtype + ) + self._w_i_diag = vs.get_variable( + "w_i_diag", shape=[self._num_units], dtype=dtype + ) + self._w_o_diag = vs.get_variable( + "w_o_diag", shape=[self._num_units], dtype=dtype + ) + + if self._use_peepholes: + c = sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * sigmoid( + time_last_state + ) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * sigmoid( + time_now_state + ) * self._activation( + j + ) + else: + c = sigmoid(f + self._forget_bias) * sigmoid( + time_last_state + ) * c_prev + sigmoid(i) * sigmoid(time_now_state) * self._activation(j) + + if self._cell_clip is not None: + # pylint: disable=invalid-unary-operand-type + c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) + # pylint: enable=invalid-unary-operand-type + if self._use_peepholes: + m = sigmoid(o + self._w_o_diag * c) * self._activation(c) + else: + m = sigmoid(o) * self._activation(c) + + if self._num_proj is not None: + if self._linear2 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer): + with vs.variable_scope("projection") as proj_scope: + if self._num_proj_shards is not None: + proj_scope.set_partitioner( + partitioned_variables.fixed_size_partitioner( + self._num_proj_shards + ) + ) + self._linear2 = _Linear(m, self._num_proj, False) + m = self._linear2(m) + + if self._proj_clip is not None: + # pylint: disable=invalid-unary-operand-type + m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) + # pylint: enable=invalid-unary-operand-type + + new_state = ( + LSTMStateTuple(c, m) + if self._state_is_tuple + else array_ops.concat([c, m], 1) + ) + return m, new_state
+ + +
[docs]class Time4ALSTMCell(RNNCell): + def __init__( + self, + num_units, + use_peepholes=False, + cell_clip=None, + initializer=None, + num_proj=None, + proj_clip=None, + num_unit_shards=None, + num_proj_shards=None, + forget_bias=1.0, + state_is_tuple=True, + activation=None, + reuse=None, + ): + super(Time4ALSTMCell, self).__init__(_reuse=reuse) + if not state_is_tuple: + logging.warn( + "%s: Using a concatenated state is slower and will soon be " + "deprecated. Use state_is_tuple=True.", + self, + ) + if num_unit_shards is not None or num_proj_shards is not None: + logging.warn( + "%s: The num_unit_shards and proj_unit_shards parameters are " + "deprecated and will be removed in Jan 2017. " + "Use a variable scope with a partitioner instead.", + self, + ) + + self._num_units = num_units + self._use_peepholes = use_peepholes + self._cell_clip = cell_clip + self._initializer = initializer + self._num_proj = num_proj + self._proj_clip = proj_clip + self._num_unit_shards = num_unit_shards + self._num_proj_shards = num_proj_shards + self._forget_bias = forget_bias + self._state_is_tuple = state_is_tuple + self._activation = activation or math_ops.tanh + + if num_proj: + self._state_size = ( + LSTMStateTuple(num_units, num_proj) + if state_is_tuple + else num_units + num_proj + ) + self._output_size = num_proj + else: + self._state_size = ( + LSTMStateTuple(num_units, num_units) + if state_is_tuple + else 2 * num_units + ) + self._output_size = num_units + self._linear1 = None + self._linear2 = None + self._time_input_w1 = None + self._time_input_w2 = None + self._time_kernel_w1 = None + self._time_kernel_t1 = None + self._time_bias1 = None + self._time_kernel_w2 = None + self._time_kernel_t2 = None + self._time_bias2 = None + self._o_kernel_t1 = None + self._o_kernel_t2 = None + if self._use_peepholes: + self._w_f_diag = None + self._w_i_diag = None + self._w_o_diag = None + + @property + def state_size(self): + return self._state_size + + @property + def output_size(self): + return self._output_size + +
[docs] def call(self, inputs, state): + """Call method for the Time4ALSTMCell. + + Args: + inputs: A 2D Tensor of shape [batch_size, input_size]. + state: A 2D Tensor of shape [batch_size, state_size]. + + Returns: + A tuple containing: + - A 2D Tensor of shape [batch_size, output_size]. + - A 2D Tensor of shape [batch_size, state_size]. + """ + att_score = tf.expand_dims(inputs[:, -1], -1) + time_now_score = tf.expand_dims(inputs[:, -2], -1) + time_last_score = tf.expand_dims(inputs[:, -3], -1) + inputs = inputs[:, :-3] + num_proj = self._num_units if self._num_proj is None else self._num_proj + sigmoid = math_ops.sigmoid + + if self._state_is_tuple: + (c_prev, m_prev) = state + else: + c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) + m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) + + dtype = inputs.dtype + input_size = inputs.get_shape().with_rank(2)[1] + if input_size is None: + raise ValueError("Could not infer input size from inputs.get_shape()[-1]") + + if self._time_kernel_w1 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + with vs.variable_scope(unit_scope): + self._time_input_w1 = vs.get_variable( + "_time_input_w1", shape=[self._num_units], dtype=dtype + ) + self._time_input_bias1 = vs.get_variable( + "_time_input_bias1", shape=[self._num_units], dtype=dtype + ) + self._time_input_w2 = vs.get_variable( + "_time_input_w2", shape=[self._num_units], dtype=dtype + ) + self._time_input_bias2 = vs.get_variable( + "_time_input_bias2", shape=[self._num_units], dtype=dtype + ) + self._time_kernel_w1 = vs.get_variable( + "_time_kernel_w1", + shape=[input_size, self._num_units], + dtype=dtype, + ) + self._time_kernel_t1 = vs.get_variable( + "_time_kernel_t1", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._time_bias1 = vs.get_variable( + "_time_bias1", shape=[self._num_units], dtype=dtype + ) + self._time_kernel_w2 = vs.get_variable( + "_time_kernel_w2", + shape=[input_size, self._num_units], + dtype=dtype, + ) + self._time_kernel_t2 = vs.get_variable( + "_time_kernel_t2", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._time_bias2 = vs.get_variable( + "_time_bias2", shape=[self._num_units], dtype=dtype + ) + self._o_kernel_t1 = vs.get_variable( + "_o_kernel_t1", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + self._o_kernel_t2 = vs.get_variable( + "_o_kernel_t2", + shape=[self._num_units, self._num_units], + dtype=dtype, + ) + + time_now_input = tf.nn.tanh( + time_now_score * self._time_input_w1 + self._time_input_bias1 + ) + time_last_input = tf.nn.tanh( + time_last_score * self._time_input_w2 + self._time_input_bias2 + ) + + time_now_state = ( + math_ops.matmul(inputs, self._time_kernel_w1) + + math_ops.matmul(time_now_input, self._time_kernel_t1) + + self._time_bias1 + ) + time_last_state = ( + math_ops.matmul(inputs, self._time_kernel_w2) + + math_ops.matmul(time_last_input, self._time_kernel_t2) + + self._time_bias2 + ) + + if self._linear1 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + if self._num_unit_shards is not None: + unit_scope.set_partitioner( + partitioned_variables.fixed_size_partitioner( + self._num_unit_shards + ) + ) + self._linear1 = _Linear([inputs, m_prev], 4 * self._num_units, True) + + # i = input_gate, j = new_input, f = forget_gate, o = output_gate + lstm_matrix = self._linear1([inputs, m_prev]) + i, j, f, o = array_ops.split(value=lstm_matrix, num_or_size_splits=4, axis=1) + o = ( + o + + math_ops.matmul(time_now_input, self._o_kernel_t1) + + math_ops.matmul(time_last_input, self._o_kernel_t2) + ) + # Diagonal connections + if self._use_peepholes and not self._w_f_diag: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer) as unit_scope: + with vs.variable_scope(unit_scope): + self._w_f_diag = vs.get_variable( + "w_f_diag", shape=[self._num_units], dtype=dtype + ) + self._w_i_diag = vs.get_variable( + "w_i_diag", shape=[self._num_units], dtype=dtype + ) + self._w_o_diag = vs.get_variable( + "w_o_diag", shape=[self._num_units], dtype=dtype + ) + + if self._use_peepholes: + c = sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * sigmoid( + time_last_state + ) * c_prev + sigmoid(i + self._w_i_diag * c_prev) * sigmoid( + time_now_state + ) * self._activation( + j + ) + else: + c = sigmoid(f + self._forget_bias) * sigmoid( + time_last_state + ) * c_prev + sigmoid(i) * sigmoid(time_now_state) * self._activation(j) + + if self._cell_clip is not None: + # pylint: disable=invalid-unary-operand-type + c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) + # pylint: enable=invalid-unary-operand-type + if self._use_peepholes: + m = sigmoid(o + self._w_o_diag * c) * self._activation(c) + else: + m = sigmoid(o) * self._activation(c) + + if self._num_proj is not None: + if self._linear2 is None: + scope = vs.get_variable_scope() + with vs.variable_scope(scope, initializer=self._initializer): + with vs.variable_scope("projection") as proj_scope: + if self._num_proj_shards is not None: + proj_scope.set_partitioner( + partitioned_variables.fixed_size_partitioner( + self._num_proj_shards + ) + ) + self._linear2 = _Linear(m, self._num_proj, False) + m = self._linear2(m) + + if self._proj_clip is not None: + # pylint: disable=invalid-unary-operand-type + m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) + # pylint: enable=invalid-unary-operand-type + c = att_score * c + (1.0 - att_score) * c + m = att_score * m + (1.0 - att_score) * m + new_state = ( + LSTMStateTuple(c, m) + if self._state_is_tuple + else array_ops.concat([c, m], 1) + ) + return m, new_state
+ + +class _Linear(object): + """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. + + Args: + args: a 2D Tensor or a list of 2D, batch x n, Tensors. + output_size: int, second dimension of weight variable. + dtype: data type for variables. + build_bias: boolean, whether to build a bias variable. + bias_initializer: starting value to initialize the bias + (default is all zeros). + kernel_initializer: starting value to initialize the weight. + + Raises: + ValueError: if inputs_shape is wrong. + """ + + def __init__( + self, + args, + output_size, + build_bias, + bias_initializer=None, + kernel_initializer=None, + ): + self._build_bias = build_bias + + if args is None or (nest.is_sequence(args) and not args): + raise ValueError("`args` must be specified") + if not nest.is_sequence(args): + args = [args] + self._is_sequence = False + else: + self._is_sequence = True + + # Calculate the total size of arguments on dimension 1. + total_arg_size = 0 + shapes = [a.get_shape() for a in args] + for shape in shapes: + if shape.ndims != 2: + raise ValueError("linear is expecting 2D arguments: %s" % shapes) + if shape[1] is None: + raise ValueError( + "linear expects shape[1] to be provided for shape %s, " + "but saw %s" % (shape, shape[1]) + ) + else: + total_arg_size += shape[1] + + dtype = [a.dtype for a in args][0] + + scope = vs.get_variable_scope() + with vs.variable_scope(scope) as outer_scope: + self._weights = vs.get_variable( + _WEIGHTS_VARIABLE_NAME, + [total_arg_size, output_size], + dtype=dtype, + initializer=kernel_initializer, + ) + if build_bias: + with vs.variable_scope(outer_scope) as inner_scope: + inner_scope.set_partitioner(None) + if bias_initializer is None: + bias_initializer = init_ops.constant_initializer( + 0.0, dtype=dtype + ) + self._biases = vs.get_variable( + _BIAS_VARIABLE_NAME, + [output_size], + dtype=dtype, + initializer=bias_initializer, + ) + + def __call__(self, args): + if not self._is_sequence: + args = [args] + + if len(args) == 1: + res = math_ops.matmul(args[0], self._weights) + else: + res = math_ops.matmul(array_ops.concat(args, 1), self._weights) + if self._build_bias: + res = nn_ops.bias_add(res, self._biases) + return res +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/sequential_base_model.html b/_modules/recommenders/models/deeprec/models/sequential/sequential_base_model.html new file mode 100644 index 0000000000..d3ccf5daf2 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/sequential_base_model.html @@ -0,0 +1,736 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.sequential_base_model — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.sequential_base_model

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+import os
+import abc
+import numpy as np
+import tensorflow as tf
+
+from recommenders.models.deeprec.models.base_model import BaseModel
+from recommenders.models.deeprec.deeprec_utils import cal_metric, load_dict
+
+
+__all__ = ["SequentialBaseModel"]
+
+
+
[docs]class SequentialBaseModel(BaseModel): + """Base class for sequential models""" + + def __init__(self, hparams, iterator_creator, graph=None, seed=None): + """Initializing the model. Create common logics which are needed by all sequential models, such as loss function, + parameter set. + + Args: + hparams (HParams): A `HParams` object, hold the entire set of hyperparameters. + iterator_creator (object): An iterator to load the data. + graph (object): An optional graph. + seed (int): Random seed. + """ + self.hparams = hparams + + self.need_sample = hparams.need_sample + self.train_num_ngs = hparams.train_num_ngs + if self.train_num_ngs is None: + raise ValueError( + "Please confirm the number of negative samples for each positive instance." + ) + self.min_seq_length = ( + hparams.min_seq_length if "min_seq_length" in hparams.values() else 1 + ) + self.hidden_size = ( + hparams.hidden_size if "hidden_size" in hparams.values() else None + ) + self.graph = tf.Graph() if not graph else graph + + with self.graph.as_default(): + self.sequence_length = tf.compat.v1.placeholder( + tf.int32, [None], name="sequence_length" + ) + + super().__init__(hparams, iterator_creator, graph=self.graph, seed=seed) + + @abc.abstractmethod + def _build_seq_graph(self): + """Subclass will implement this.""" + pass + + def _build_graph(self): + """The main function to create sequential models. + + Returns: + object: the prediction score make by the model. + """ + hparams = self.hparams + self.keep_prob_train = 1 - np.array(hparams.dropout) + self.keep_prob_test = np.ones_like(hparams.dropout) + + with tf.compat.v1.variable_scope("sequential") as self.sequential_scope: + self._build_embedding() + self._lookup_from_embedding() + model_output = self._build_seq_graph() + logit = self._fcn_net(model_output, hparams.layer_sizes, scope="logit_fcn") + self._add_norm() + return logit + +
[docs] def fit( + self, + train_file, + valid_file, + valid_num_ngs, + eval_metric="group_auc", + ): + """Fit the model with `train_file`. Evaluate the model on `valid_file` per epoch to observe the training status. + If `test_file` is not None, evaluate it too. + + Args: + train_file (str): training data set. + valid_file (str): validation set. + valid_num_ngs (int): the number of negative instances with one positive instance in validation data. + eval_metric (str): the metric that control early stopping. e.g. "auc", "group_auc", etc. + + Returns: + object: An instance of self. + """ + + # check bad input. + if not self.need_sample and self.train_num_ngs < 1: + raise ValueError( + "Please specify a positive integer of negative numbers for training without sampling needed." + ) + if valid_num_ngs < 1: + raise ValueError( + "Please specify a positive integer of negative numbers for validation." + ) + + if self.need_sample and self.train_num_ngs < 1: + self.train_num_ngs = 1 + + if self.hparams.write_tfevents and self.hparams.SUMMARIES_DIR: + if not os.path.exists(self.hparams.SUMMARIES_DIR): + os.makedirs(self.hparams.SUMMARIES_DIR) + + self.writer = tf.compat.v1.summary.FileWriter( + self.hparams.SUMMARIES_DIR, self.sess.graph + ) + + train_sess = self.sess + eval_info = list() + + best_metric, self.best_epoch = 0, 0 + + for epoch in range(1, self.hparams.epochs + 1): + step = 0 + self.hparams.current_epoch = epoch + epoch_loss = 0 + file_iterator = self.iterator.load_data_from_file( + train_file, + min_seq_length=self.min_seq_length, + batch_num_ngs=self.train_num_ngs, + ) + + for batch_data_input in file_iterator: + if batch_data_input: + step_result = self.train(train_sess, batch_data_input) + (_, _, step_loss, step_data_loss, summary) = step_result + if self.hparams.write_tfevents and self.hparams.SUMMARIES_DIR: + self.writer.add_summary(summary, step) + epoch_loss += step_loss + step += 1 + if step % self.hparams.show_step == 0: + print( + "step {0:d} , total_loss: {1:.4f}, data_loss: {2:.4f}".format( + step, step_loss, step_data_loss + ) + ) + + valid_res = self.run_eval(valid_file, valid_num_ngs) + print( + "eval valid at epoch {0}: {1}".format( + epoch, + ",".join( + [ + "" + str(key) + ":" + str(value) + for key, value in valid_res.items() + ] + ), + ) + ) + eval_info.append((epoch, valid_res)) + + progress = False + early_stop = self.hparams.EARLY_STOP + if valid_res[eval_metric] > best_metric: + best_metric = valid_res[eval_metric] + self.best_epoch = epoch + progress = True + else: + if early_stop > 0 and epoch - self.best_epoch >= early_stop: + print("early stop at epoch {0}!".format(epoch)) + break + + if self.hparams.save_model and self.hparams.MODEL_DIR: + if not os.path.exists(self.hparams.MODEL_DIR): + os.makedirs(self.hparams.MODEL_DIR) + if progress: + checkpoint_path = self.saver.save( + sess=train_sess, + save_path=self.hparams.MODEL_DIR + "epoch_" + str(epoch), + ) + checkpoint_path = self.saver.save( # noqa: F841 + sess=train_sess, + save_path=os.path.join(self.hparams.MODEL_DIR, "best_model"), + ) + + if self.hparams.write_tfevents: + self.writer.close() + + print(eval_info) + print("best epoch: {0}".format(self.best_epoch)) + return self
+ +
[docs] def run_eval(self, filename, num_ngs): + """Evaluate the given file and returns some evaluation metrics. + + Args: + filename (str): A file name that will be evaluated. + num_ngs (int): The number of negative sampling for a positive instance. + + Returns: + dict: A dictionary that contains evaluation metrics. + """ + + load_sess = self.sess + preds = [] + labels = [] + group_preds = [] + group_labels = [] + group = num_ngs + 1 + + for batch_data_input in self.iterator.load_data_from_file( + filename, min_seq_length=self.min_seq_length, batch_num_ngs=0 + ): + if batch_data_input: + step_pred, step_labels = self.eval(load_sess, batch_data_input) + preds.extend(np.reshape(step_pred, -1)) + labels.extend(np.reshape(step_labels, -1)) + group_preds.extend(np.reshape(step_pred, (-1, group))) + group_labels.extend(np.reshape(step_labels, (-1, group))) + + res = cal_metric(labels, preds, self.hparams.metrics) + res_pairwise = cal_metric( + group_labels, group_preds, self.hparams.pairwise_metrics + ) + res.update(res_pairwise) + return res
+ +
[docs] def predict(self, infile_name, outfile_name): + """Make predictions on the given data, and output predicted scores to a file. + + Args: + infile_name (str): Input file name. + outfile_name (str): Output file name. + + Returns: + object: An instance of self. + """ + + load_sess = self.sess + with tf.io.gfile.GFile(outfile_name, "w") as wt: + for batch_data_input in self.iterator.load_data_from_file( + infile_name, batch_num_ngs=0 + ): + if batch_data_input: + step_pred = self.infer(load_sess, batch_data_input) + step_pred = np.reshape(step_pred, -1) + wt.write("\n".join(map(str, step_pred))) + wt.write("\n") + return self
+ + def _build_embedding(self): + """The field embedding layer. Initialization of embedding variables.""" + hparams = self.hparams + self.user_vocab_length = len(load_dict(hparams.user_vocab)) + self.item_vocab_length = len(load_dict(hparams.item_vocab)) + self.cate_vocab_length = len(load_dict(hparams.cate_vocab)) + self.user_embedding_dim = hparams.user_embedding_dim + self.item_embedding_dim = hparams.item_embedding_dim + self.cate_embedding_dim = hparams.cate_embedding_dim + + with tf.compat.v1.variable_scope("embedding", initializer=self.initializer): + self.user_lookup = tf.compat.v1.get_variable( + name="user_embedding", + shape=[self.user_vocab_length, self.user_embedding_dim], + dtype=tf.float32, + ) + self.item_lookup = tf.compat.v1.get_variable( + name="item_embedding", + shape=[self.item_vocab_length, self.item_embedding_dim], + dtype=tf.float32, + ) + self.cate_lookup = tf.compat.v1.get_variable( + name="cate_embedding", + shape=[self.cate_vocab_length, self.cate_embedding_dim], + dtype=tf.float32, + ) + + def _lookup_from_embedding(self): + """Lookup from embedding variables. A dropout layer follows lookup operations.""" + self.user_embedding = tf.nn.embedding_lookup( + params=self.user_lookup, ids=self.iterator.users + ) + tf.compat.v1.summary.histogram("user_embedding_output", self.user_embedding) + + self.item_embedding = tf.compat.v1.nn.embedding_lookup( + params=self.item_lookup, ids=self.iterator.items + ) + self.item_history_embedding = tf.compat.v1.nn.embedding_lookup( + params=self.item_lookup, ids=self.iterator.item_history + ) + tf.compat.v1.summary.histogram( + "item_history_embedding_output", self.item_history_embedding + ) + + self.cate_embedding = tf.compat.v1.nn.embedding_lookup( + params=self.cate_lookup, ids=self.iterator.cates + ) + self.cate_history_embedding = tf.compat.v1.nn.embedding_lookup( + params=self.cate_lookup, ids=self.iterator.item_cate_history + ) + tf.compat.v1.summary.histogram( + "cate_history_embedding_output", self.cate_history_embedding + ) + + involved_items = tf.concat( + [ + tf.reshape(self.iterator.item_history, [-1]), + tf.reshape(self.iterator.items, [-1]), + ], + -1, + ) + self.involved_items, _ = tf.unique(involved_items) + involved_item_embedding = tf.nn.embedding_lookup( + params=self.item_lookup, ids=self.involved_items + ) + self.embed_params.append(involved_item_embedding) + + involved_cates = tf.concat( + [ + tf.reshape(self.iterator.item_cate_history, [-1]), + tf.reshape(self.iterator.cates, [-1]), + ], + -1, + ) + self.involved_cates, _ = tf.unique(involved_cates) + involved_cate_embedding = tf.nn.embedding_lookup( + params=self.cate_lookup, ids=self.involved_cates + ) + self.embed_params.append(involved_cate_embedding) + + self.target_item_embedding = tf.concat( + [self.item_embedding, self.cate_embedding], -1 + ) + tf.compat.v1.summary.histogram( + "target_item_embedding_output", self.target_item_embedding + ) + + def _add_norm(self): + """Regularization for embedding variables and other variables.""" + all_variables, embed_variables = ( + tf.compat.v1.trainable_variables(), + tf.compat.v1.trainable_variables( + self.sequential_scope._name + "/embedding" + ), + ) + layer_params = list(set(all_variables) - set(embed_variables)) + layer_params = [a for a in layer_params if "_no_reg" not in a.name] + self.layer_params.extend(layer_params)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/sli_rec.html b/_modules/recommenders/models/deeprec/models/sequential/sli_rec.html new file mode 100644 index 0000000000..6b52bc72b4 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/sli_rec.html @@ -0,0 +1,525 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.sli_rec — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.sli_rec

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+from tensorflow.compat.v1.nn import dynamic_rnn
+from recommenders.models.deeprec.models.sequential.rnn_cell_implement import (
+    Time4LSTMCell,
+)
+
+__all__ = ["SLI_RECModel"]
+
+
+
[docs]class SLI_RECModel(SequentialBaseModel): + """SLI Rec model + + :Citation: + + Z. Yu, J. Lian, A. Mahmoody, G. Liu and X. Xie, "Adaptive User Modeling with + Long and Short-Term Preferences for Personailzed Recommendation", in Proceedings of + the 28th International Joint Conferences on Artificial Intelligence, IJCAI’19, + Pages 4213-4219, AAAI Press, 2019. + """ + + def _build_seq_graph(self): + """The main function to create sli_rec model. + + Returns: + object: the output of sli_rec section. + """ + hparams = self.hparams + with tf.compat.v1.variable_scope("sli_rec"): + hist_input = tf.concat( + [self.item_history_embedding, self.cate_history_embedding], 2 + ) + self.mask = self.iterator.mask + self.sequence_length = tf.reduce_sum(input_tensor=self.mask, axis=1) + + with tf.compat.v1.variable_scope("long_term_asvd"): + att_outputs1 = self._attention(hist_input, hparams.attention_size) + att_fea1 = tf.reduce_sum(input_tensor=att_outputs1, axis=1) + tf.compat.v1.summary.histogram("att_fea1", att_fea1) + + item_history_embedding_new = tf.concat( + [ + self.item_history_embedding, + tf.expand_dims(self.iterator.time_from_first_action, -1), + ], + -1, + ) + item_history_embedding_new = tf.concat( + [ + item_history_embedding_new, + tf.expand_dims(self.iterator.time_to_now, -1), + ], + -1, + ) + with tf.compat.v1.variable_scope("rnn"): + rnn_outputs, _ = dynamic_rnn( + Time4LSTMCell(hparams.hidden_size), + inputs=item_history_embedding_new, + sequence_length=self.sequence_length, + dtype=tf.float32, + scope="time4lstm", + ) + tf.compat.v1.summary.histogram("LSTM_outputs", rnn_outputs) + + with tf.compat.v1.variable_scope("attention_fcn"): + att_outputs2 = self._attention_fcn( + self.target_item_embedding, rnn_outputs + ) + att_fea2 = tf.reduce_sum(input_tensor=att_outputs2, axis=1) + tf.compat.v1.summary.histogram("att_fea2", att_fea2) + + # ensemble + with tf.compat.v1.name_scope("alpha"): + concat_all = tf.concat( + [ + self.target_item_embedding, + att_fea1, + att_fea2, + tf.expand_dims(self.iterator.time_to_now[:, -1], -1), + ], + 1, + ) + last_hidden_nn_layer = concat_all + alpha_logit = self._fcn_net( + last_hidden_nn_layer, hparams.att_fcn_layer_sizes, scope="fcn_alpha" + ) + alpha_output = tf.sigmoid(alpha_logit) + user_embed = att_fea1 * alpha_output + att_fea2 * (1.0 - alpha_output) + model_output = tf.concat([user_embed, self.target_item_embedding], 1) + tf.compat.v1.summary.histogram("model_output", model_output) + return model_output + + def _attention_fcn(self, query, user_embedding): + """Apply attention by fully connected layers. + + Args: + query (object): The embedding of target item which is regarded as a query in attention operations. + user_embedding (object): The output of RNN layers which is regarded as user modeling. + + Returns: + object: Weighted sum of user modeling. + """ + hparams = self.hparams + with tf.compat.v1.variable_scope("attention_fcn"): + query_size = query.shape[1] + boolean_mask = tf.equal(self.mask, tf.ones_like(self.mask)) + + attention_mat = tf.compat.v1.get_variable( + name="attention_mat", + shape=[user_embedding.shape.as_list()[-1], query_size], + initializer=self.initializer, + ) + att_inputs = tf.tensordot(user_embedding, attention_mat, [[2], [0]]) + + queries = tf.reshape( + tf.tile(query, [1, att_inputs.shape[1]]), tf.shape(input=att_inputs) + ) + last_hidden_nn_layer = tf.concat( + [att_inputs, queries, att_inputs - queries, att_inputs * queries], -1 + ) + att_fnc_output = self._fcn_net( + last_hidden_nn_layer, hparams.att_fcn_layer_sizes, scope="att_fcn" + ) + att_fnc_output = tf.squeeze(att_fnc_output, -1) + mask_paddings = tf.ones_like(att_fnc_output) * (-(2**32) + 1) + att_weights = tf.nn.softmax( + tf.compat.v1.where(boolean_mask, att_fnc_output, mask_paddings), + name="att_weights", + ) + output = user_embedding * tf.expand_dims(att_weights, -1) + return output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/sum.html b/_modules/recommenders/models/deeprec/models/sequential/sum.html new file mode 100644 index 0000000000..cf5592450e --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/sum.html @@ -0,0 +1,543 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.sum — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.sum

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from tensorflow.compat.v1.nn import dynamic_rnn
+from recommenders.models.deeprec.models.sequential.sequential_base_model import (
+    SequentialBaseModel,
+)
+from recommenders.models.deeprec.models.sequential.sum_cells import (
+    SUMCell,
+    SUMV2Cell,
+)
+
+
+
[docs]class SUMModel(SequentialBaseModel): + """Sequential User Matrix Model + + :Citation: + + Lian, J., Batal, I., Liu, Z., Soni, A., Kang, E. Y., Wang, Y., & Xie, X., + "Multi-Interest-Aware User Modeling for Large-Scale Sequential Recommendations", arXiv preprint arXiv:2102.09211, 2021. + """ + + def _build_seq_graph(self): + """The main function to create SUM model. + + Returns: + object: The output of SUM section, which is a concatenation of user vector and target item vector. + """ + hparams = self.hparams # noqa: F841 + with tf.compat.v1.variable_scope("sum"): + self.history_embedding = tf.concat( + [self.item_history_embedding, self.cate_history_embedding], 2 + ) + cell = self._create_sumcell() + self.cell = cell + cell.model = self + final_state = self._build_sum(cell) + + for _p in cell.parameter_set: + tf.compat.v1.summary.histogram(_p.name, _p) + if hasattr(cell, "_alpha") and hasattr(cell._alpha, "name"): + tf.compat.v1.summary.histogram(cell._alpha.name, cell._alpha) + if hasattr(cell, "_beta") and hasattr(cell._beta, "name"): + tf.compat.v1.summary.histogram(cell._beta.name, cell._beta) + + final_state, att_weights = self._attention_query_by_state( + final_state, self.target_item_embedding + ) + model_output = tf.concat([final_state, self.target_item_embedding], 1) + tf.compat.v1.summary.histogram("model_output", model_output) + return model_output + + def _attention_query_by_state(self, seq_output, query): + """Merge a user's memory states conditioned by a query item. + + Params: + seq_output: A flatten representation of SUM memory states for (a batch of) users + query: (a batch of) target item candidates + + Returns: + tf.Tensor, tf.Tensor: Merged user representation. Attention weights of each memory channel. + """ + dim_q = query.shape[-1] + att_weights = tf.constant(1.0, dtype=tf.float32) + with tf.compat.v1.variable_scope("query_att"): + if self.hparams.slots > 1: + query_att_W = tf.compat.v1.get_variable( + name="query_att_W", + shape=[self.hidden_size, dim_q], + initializer=self.initializer, + ) + + # reshape the memory states to (BatchSize, Slots, HiddenSize) + memory_state = tf.reshape( + seq_output, [-1, self.hparams.slots, self.hidden_size] + ) + + att_weights = tf.nn.softmax( + tf.squeeze( + tf.matmul( + tf.tensordot(memory_state, query_att_W, axes=1), + tf.expand_dims(query, -1), + ), + -1, + ), + -1, + ) + # merge the memory states, the final shape is (BatchSize, HiddenSize) + att_res = tf.reduce_sum( + input_tensor=memory_state * tf.expand_dims(att_weights, -1), axis=1 + ) + + else: + att_res = seq_output + + return att_res, att_weights + + def _create_sumcell(self): + """Create a SUM cell + + Returns: + object: An initialized SUM cell + """ + hparams = self.hparams + input_embedding_dim = self.history_embedding.shape[-1] + input_params = [ + hparams.hidden_size * hparams.slots + input_embedding_dim, + hparams.slots, + hparams.attention_size, + input_embedding_dim, + ] + sumcells = {"SUM": SUMCell, "SUMV2": SUMV2Cell} + sumCell = sumcells[hparams.cell] + res = None + if hparams.cell in ["SUM", "SUMV2"]: + res = sumCell(*input_params) + else: + raise ValueError("ERROR! Cell type not support: {0}".format(hparams.cell)) + return res + + def _build_sum(self, cell): + """Generate user memory states from behavior sequence + + Args: + object: An initialied SUM cell. + + Returns: + object: A flatten representation of user memory states, in the shape of (BatchSize, SlotsNum x HiddenSize) + """ + hparams = self.hparams + with tf.compat.v1.variable_scope("sum"): + self.mask = self.iterator.mask + self.sequence_length = tf.reduce_sum(input_tensor=self.mask, axis=1) + + rum_outputs, final_state = dynamic_rnn( + cell, + inputs=self.history_embedding, + dtype=tf.float32, + sequence_length=self.sequence_length, + scope="sum", + initial_state=cell.zero_state( + tf.shape(input=self.history_embedding)[0], tf.float32 + ), + ) + + final_state = final_state[:, : hparams.slots * hparams.hidden_size] + + self.heads = cell.heads + self.alpha = cell._alpha + self.beta = cell._beta + tf.compat.v1.summary.histogram("SUM_outputs", rum_outputs) + + return final_state
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/sequential/sum_cells.html b/_modules/recommenders/models/deeprec/models/sequential/sum_cells.html new file mode 100644 index 0000000000..73344a8c4a --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/sequential/sum_cells.html @@ -0,0 +1,770 @@ + + + + + + + + + + + recommenders.models.deeprec.models.sequential.sum_cells — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.sequential.sum_cells

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+from keras.layers.legacy_rnn.rnn_cell_impl import LayerRNNCell
+from tensorflow.python.eager import context
+from tensorflow.python.keras import activations
+from tensorflow.python.keras import initializers
+from tensorflow.python.keras.utils import tf_utils
+from tensorflow.python.ops import math_ops
+from tensorflow.python.platform import tf_logging as logging
+from tensorflow.python.ops import init_ops
+from tensorflow.python.framework import dtypes
+from tensorflow.python.util import nest
+
+
+_BIAS_VARIABLE_NAME = "bias"
+_WEIGHTS_VARIABLE_NAME = "kernel"
+
+
+
[docs]class SUMCell(LayerRNNCell): + """Cell for Sequential User Matrix""" + + def __init__( + self, + num_units, + slots, + attention_size, + input_size, + activation=None, + reuse=None, + kernel_initializer=None, + bias_initializer=None, + name=None, + dtype=None, + **kwargs + ): + super(SUMCell, self).__init__(_reuse=reuse, name=name, dtype=dtype, **kwargs) + _check_supported_dtypes(self.dtype) + + if context.executing_eagerly() and context.num_gpus() > 0: + logging.warn( + "%s: Note that this cell is not optimized for performance. " + "Please use keras.layers.cudnn_recurrent.CuDNNGRU for better " + "performance on GPU.", + self, + ) + + self._input_size = input_size + self._slots = slots - 1 # the last channel is reserved for the highway slot + self._num_units = num_units + self._real_units = (self._num_units - input_size) // slots + if activation: + self._activation = activations.get(activation) + else: + self._activation = math_ops.tanh + self._kernel_initializer = initializers.get(kernel_initializer) + self._bias_initializer = initializers.get(bias_initializer) + + @property + def state_size(self): + return self._num_units + + @property + def output_size(self): + return self._num_units + + def _basic_build(self, inputs_shape): + """Common initialization operations for SUM cell and its variants. + This function creates parameters for the cell. + """ + + d = inputs_shape[-1] + h = self._real_units + s = self._slots + + self._erase_W = self.add_variable( + name="_erase_W", shape=[d + h, h], initializer=self._kernel_initializer + ) + self._erase_b = self.add_variable( + name="_erase_b", + shape=[h], + initializer=( + self._bias_initializer + if self._bias_initializer is not None + else init_ops.constant_initializer(1.0, dtype=self.dtype) + ), + ) + + self._reset_W = self.add_variable( + name="_reset_W", shape=[d + h, 1], initializer=self._kernel_initializer + ) + self._reset_b = self.add_variable( + name="_reset_b", + shape=[1], + initializer=( + self._bias_initializer + if self._bias_initializer is not None + else init_ops.constant_initializer(1.0, dtype=self.dtype) + ), + ) + + self._add_W = self.add_variable( + name="_add_W", shape=[d + h, h], initializer=self._kernel_initializer + ) + self._add_b = self.add_variable( + name="_add_b", + shape=[h], + initializer=( + self._bias_initializer + if self._bias_initializer is not None + else init_ops.constant_initializer(1.0, dtype=self.dtype) + ), + ) + self.heads = self.add_variable( + name="_heads", shape=[s, d], initializer=self._kernel_initializer + ) + + self._beta = self.add_variable( + name="_beta_no_reg", + shape=(), + initializer=tf.compat.v1.constant_initializer( + np.array([1.02]), dtype=np.float32 + ), + ) + self._alpha = self.add_variable( + name="_alpha_no_reg", + shape=(), + initializer=tf.compat.v1.constant_initializer( + np.array([0.98]), dtype=np.float32 + ), + ) + + @tf_utils.shape_type_conversion + def build(self, inputs_shape): + """Initialization operations for SUM cell. + this function creates all the parameters for the cell. + """ + if inputs_shape[-1] is None: + raise ValueError( + "Expected inputs.shape[-1] to be known, saw shape: %s" + % str(inputs_shape) + ) + _check_supported_dtypes(self.dtype) + d = inputs_shape[-1] # noqa: F841 + h = self._real_units # noqa: F841 + s = self._slots # noqa: F841 + + self._basic_build(inputs_shape) + + self.parameter_set = [ + self._erase_W, + self._erase_b, + self._reset_W, + self._reset_b, + self._add_W, + self._add_b, + self.heads, + ] + + self.built = True + +
[docs] def call(self, inputs, state): + """The real operations for SUM cell to process user behaviors. + + params: + inputs: (a batch of) user behaviors at time T + state: (a batch of) user states at time T-1 + + returns: + state, state: + - after process the user behavior at time T, returns (a batch of) new user states at time T + - after process the user behavior at time T, returns (a batch of) new user states at time T + """ + _check_rnn_cell_input_dtypes([inputs, state]) + + h = self._real_units + s = self._slots + 1 + state, last = state[:, : s * h], state[:, s * h :] + state = tf.reshape(state, [-1, s, h]) + + att_logit_mat = tf.matmul(inputs, self.heads, transpose_b=True) + + att_weights = tf.nn.softmax(self._beta * att_logit_mat, axis=-1) + att_weights = tf.expand_dims(att_weights, 2) + + h_hat = tf.reduce_sum( + input_tensor=tf.multiply(state[:, : self._slots, :], att_weights), axis=1 + ) + h_hat = (h_hat + state[:, self._slots, :]) / 2 + + n_a, n_b = tf.nn.l2_normalize(last, 1), tf.nn.l2_normalize(inputs, 1) + dist = tf.expand_dims(tf.reduce_sum(input_tensor=n_a * n_b, axis=1), 1) + dist = tf.math.pow(self._alpha, dist) + + att_weights = att_weights * tf.expand_dims(dist, 1) + + reset = tf.sigmoid( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, h_hat], axis=-1), self._reset_W, self._reset_b + ) + ) + erase = tf.sigmoid( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, h_hat], axis=-1), self._erase_W, self._erase_b + ) + ) + add = tf.tanh( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, reset * h_hat], axis=-1), self._add_W, self._add_b + ) + ) + + start_part01 = state[:, : self._slots, :] + state01 = start_part01 * ( + tf.ones_like(start_part01) - att_weights * tf.expand_dims(erase, 1) + ) + state01 = state01 + att_weights * tf.expand_dims(erase, 1) * tf.expand_dims( + add, 1 + ) + state01 = tf.reshape(state01, [-1, self._slots * self._real_units]) + + start_part02 = state[:, self._slots, :] + state02 = start_part02 * (tf.ones_like(start_part02) - dist * erase) + state02 = state02 + dist * erase * add + state = tf.concat([state01, state02, inputs], axis=-1) + return state, state
+ +
[docs] def get_config(self): + config = { + "num_units": self._num_units, + "kernel_initializer": initializers.serialize(self._kernel_initializer), + "bias_initializer": initializers.serialize(self._bias_initializer), + "activation": activations.serialize(self._activation), + "reuse": self._reuse, + } + base_config = super(SUMCell, self).get_config() + return dict(list(base_config.items()) + list(config.items()))
+ + +
[docs]class SUMV2Cell(SUMCell): + """A variant of SUM cell, which upgrades the writing attention""" + + @tf_utils.shape_type_conversion + def build(self, inputs_shape): + """Initialization operations for SUMV2 cell. + this function creates all the parameters for the cell. + """ + if inputs_shape[-1] is None: + raise ValueError( + "Expected inputs.shape[-1] to be known, saw shape: %s" + % str(inputs_shape) + ) + _check_supported_dtypes(self.dtype) + d = inputs_shape[-1] + h = self._real_units + s = self._slots + + self._basic_build(inputs_shape) + + self._writing_W = self.add_variable( + name="_writing_W", shape=[d + h, h], initializer=self._kernel_initializer + ) + self._writing_b = self.add_variable( + name="_writing_b", + shape=[h], + initializer=( + self._bias_initializer + if self._bias_initializer is not None + else init_ops.constant_initializer(1.0, dtype=self.dtype) + ), + ) + self._writing_W02 = self.add_variable( + name="_writing_W02", shape=[h, s], initializer=self._kernel_initializer + ) + + self.parameter_set = [ + self._erase_W, + self._erase_b, + self._reset_W, + self._reset_b, + self._add_W, + self._add_b, + self.heads, + self._writing_W, + self._writing_W02, + self._writing_b, + ] + + self.built = True + +
[docs] def call(self, inputs, state): + """The real operations for SUMV2 cell to process user behaviors. + + Args: + inputs: (a batch of) user behaviors at time T + state: (a batch of) user states at time T-1 + + Returns: + state: after process the user behavior at time T, returns (a batch of) new user states at time T + state: after process the user behavior at time T, returns (a batch of) new user states at time T + """ + _check_rnn_cell_input_dtypes([inputs, state]) + + h = self._real_units + s = self._slots + 1 + state, last = state[:, : s * h], state[:, s * h :] + state = tf.reshape(state, [-1, s, h]) + + att_logit_mat = tf.matmul(inputs, self.heads, transpose_b=True) + + att_weights = tf.nn.softmax(self._beta * att_logit_mat, axis=-1) + att_weights = tf.expand_dims(att_weights, 2) + + h_hat = tf.reduce_sum( + input_tensor=tf.multiply(state[:, : self._slots, :], att_weights), axis=1 + ) + h_hat = (h_hat + state[:, self._slots, :]) / 2 + + # get the true writing attentions + writing_input = tf.concat([inputs, h_hat], axis=1) + att_weights = tf.compat.v1.nn.xw_plus_b( + writing_input, self._writing_W, self._writing_b + ) + att_weights = tf.nn.relu(att_weights) + att_weights = tf.matmul(att_weights, self._writing_W02) + att_weights = tf.nn.softmax(att_weights, axis=-1) + att_weights = tf.expand_dims(att_weights, 2) + + n_a, n_b = tf.nn.l2_normalize(last, 1), tf.nn.l2_normalize(inputs, 1) + dist = tf.expand_dims(tf.reduce_sum(input_tensor=n_a * n_b, axis=1), 1) + dist = tf.math.pow(self._alpha, dist) + + att_weights = att_weights * tf.expand_dims(dist, 1) + + reset = tf.sigmoid( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, h_hat], axis=-1), self._reset_W, self._reset_b + ) + ) + erase = tf.sigmoid( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, h_hat], axis=-1), self._erase_W, self._erase_b + ) + ) + add = tf.tanh( + tf.compat.v1.nn.xw_plus_b( + tf.concat([inputs, reset * h_hat], axis=-1), self._add_W, self._add_b + ) + ) + + start_part01 = state[:, : self._slots, :] + state01 = start_part01 * ( + tf.ones_like(start_part01) - att_weights * tf.expand_dims(erase, 1) + ) + state01 = state01 + att_weights * tf.expand_dims(erase, 1) * tf.expand_dims( + add, 1 + ) + state01 = tf.reshape(state01, [-1, self._slots * self._real_units]) + + start_part02 = state[:, self._slots, :] + state02 = start_part02 * (tf.ones_like(start_part02) - dist * erase) + state02 = state02 + dist * erase * add + state = tf.concat([state01, state02, inputs], axis=-1) + return state, state
+ + +def _check_rnn_cell_input_dtypes(inputs): + for t in nest.flatten(inputs): + _check_supported_dtypes(t.dtype) + + +def _check_supported_dtypes(dtype): + if dtype is None: + return + dtype = dtypes.as_dtype(dtype) + if not (dtype.is_floating or dtype.is_complex): + raise ValueError( + "RNN cell only supports floating point inputs, " "but saw dtype: %s" % dtype + ) +
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/deeprec/models/xDeepFM.html b/_modules/recommenders/models/deeprec/models/xDeepFM.html new file mode 100644 index 0000000000..78917ac084 --- /dev/null +++ b/_modules/recommenders/models/deeprec/models/xDeepFM.html @@ -0,0 +1,923 @@ + + + + + + + + + + + recommenders.models.deeprec.models.xDeepFM — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.deeprec.models.xDeepFM

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+
+from recommenders.models.deeprec.models.base_model import BaseModel
+
+
+__all__ = ["XDeepFMModel"]
+
+
+
[docs]class XDeepFMModel(BaseModel): + """xDeepFM model + + :Citation: + + J. Lian, X. Zhou, F. Zhang, Z. Chen, X. Xie, G. Sun, "xDeepFM: Combining Explicit + and Implicit Feature Interactions for Recommender Systems", in Proceedings of the + 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, + KDD 2018, London, 2018. + """ + + def _build_graph(self): + """The main function to create xdeepfm's logic. + + Returns: + object: The prediction score made by the model. + """ + hparams = self.hparams + self.keep_prob_train = 1 - np.array(hparams.dropout) + self.keep_prob_test = np.ones_like(hparams.dropout) + + with tf.compat.v1.variable_scope("XDeepFM") as scope: # noqa: F841 + with tf.compat.v1.variable_scope( + "embedding", initializer=self.initializer + ) as escope: # noqa: F841 + self.embedding = tf.compat.v1.get_variable( + name="embedding_layer", + shape=[hparams.FEATURE_COUNT, hparams.dim], + dtype=tf.float32, + ) + self.embed_params.append(self.embedding) + embed_out, embed_layer_size = self._build_embedding() + + logit = 0 + + if hparams.use_Linear_part: + print("Add linear part.") + logit = logit + self._build_linear() + + if hparams.use_FM_part: + print("Add FM part.") + logit = logit + self._build_fm() + + if hparams.use_CIN_part: + print("Add CIN part.") + if hparams.fast_CIN_d <= 0: + logit = logit + self._build_CIN( + embed_out, res=True, direct=False, bias=False, is_masked=True + ) + else: + logit = logit + self._build_fast_CIN( + embed_out, res=True, direct=False, bias=False + ) + + if hparams.use_DNN_part: + print("Add DNN part.") + logit = logit + self._build_dnn(embed_out, embed_layer_size) + + return logit + + def _build_embedding(self): + """The field embedding layer. MLP requires fixed-length vectors as input. + This function makes sum pooling of feature embeddings for each field. + + Returns: + embedding: The result of field embedding layer, with size of #_fields * #_dim. + embedding_size: #_fields * #_dim + """ + hparams = self.hparams + fm_sparse_index = tf.SparseTensor( + self.iterator.dnn_feat_indices, + self.iterator.dnn_feat_values, + self.iterator.dnn_feat_shape, + ) + fm_sparse_weight = tf.SparseTensor( + self.iterator.dnn_feat_indices, + self.iterator.dnn_feat_weights, + self.iterator.dnn_feat_shape, + ) + w_fm_nn_input_orgin = tf.nn.embedding_lookup_sparse( + params=self.embedding, + sp_ids=fm_sparse_index, + sp_weights=fm_sparse_weight, + combiner="sum", + ) + embedding = tf.reshape( + w_fm_nn_input_orgin, [-1, hparams.dim * hparams.FIELD_COUNT] + ) + embedding_size = hparams.FIELD_COUNT * hparams.dim + return embedding, embedding_size + + def _build_linear(self): + """Construct the linear part for the model. + This is a linear regression. + + Returns: + object: Prediction score made by linear regression. + """ + with tf.compat.v1.variable_scope( + "linear_part", initializer=self.initializer + ) as scope: # noqa: F841 + w = tf.compat.v1.get_variable( + name="w", shape=[self.hparams.FEATURE_COUNT, 1], dtype=tf.float32 + ) + b = tf.compat.v1.get_variable( + name="b", + shape=[1], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + x = tf.SparseTensor( + self.iterator.fm_feat_indices, + self.iterator.fm_feat_values, + self.iterator.fm_feat_shape, + ) + linear_output = tf.add(tf.sparse.sparse_dense_matmul(x, w), b) + self.layer_params.append(w) + self.layer_params.append(b) + tf.compat.v1.summary.histogram("linear_part/w", w) + tf.compat.v1.summary.histogram("linear_part/b", b) + return linear_output + + def _build_fm(self): + """Construct the factorization machine part for the model. + This is a traditional 2-order FM module. + + Returns: + object: Prediction score made by factorization machine. + """ + with tf.compat.v1.variable_scope("fm_part") as scope: # noqa: F841 + x = tf.SparseTensor( + self.iterator.fm_feat_indices, + self.iterator.fm_feat_values, + self.iterator.fm_feat_shape, + ) + xx = tf.SparseTensor( + self.iterator.fm_feat_indices, + tf.pow(self.iterator.fm_feat_values, 2), + self.iterator.fm_feat_shape, + ) + fm_output = 0.5 * tf.reduce_sum( + input_tensor=tf.pow(tf.sparse.sparse_dense_matmul(x, self.embedding), 2) + - tf.sparse.sparse_dense_matmul(xx, tf.pow(self.embedding, 2)), + axis=1, + keepdims=True, + ) + return fm_output + + def _build_CIN( + self, nn_input, res=False, direct=False, bias=False, is_masked=False + ): + """Construct the compressed interaction network. + This component provides explicit and vector-wise higher-order feature interactions. + + Args: + nn_input (object): The output of field-embedding layer. This is the input for CIN. + res (bool): Whether use residual structure to fuse the results from each layer of CIN. + direct (bool): If true, then all hidden units are connected to both next layer and output layer; + otherwise, half of hidden units are connected to next layer and the other half will be connected to output layer. + bias (bool): Whether to add bias term when calculating the feature maps. + is_masked (bool): Controls whether to remove self-interaction in the first layer of CIN. + + Returns: + object: Prediction score made by CIN. + """ + hparams = self.hparams + hidden_nn_layers = [] + field_nums = [] + final_len = 0 + field_num = hparams.FIELD_COUNT + nn_input = tf.reshape(nn_input, shape=[-1, int(field_num), hparams.dim]) + field_nums.append(int(field_num)) + hidden_nn_layers.append(nn_input) + final_result = [] + split_tensor0 = tf.split(hidden_nn_layers[0], hparams.dim * [1], 2) + with tf.compat.v1.variable_scope( + "exfm_part", initializer=self.initializer + ) as scope: # noqa: F841 + for idx, layer_size in enumerate(hparams.cross_layer_sizes): + split_tensor = tf.split(hidden_nn_layers[-1], hparams.dim * [1], 2) + dot_result_m = tf.matmul( + split_tensor0, split_tensor, transpose_b=True + ) # shape : (Dim, Batch, FieldNum, HiddenNum), a.k.a (D,B,F,H) + dot_result_o = tf.reshape( + dot_result_m, + shape=[hparams.dim, -1, field_nums[0] * field_nums[-1]], + ) # shape: (D,B,FH) + dot_result = tf.transpose(a=dot_result_o, perm=[1, 0, 2]) # (B,D,FH) + + filters = tf.compat.v1.get_variable( + name="f_" + str(idx), + shape=[1, field_nums[-1] * field_nums[0], layer_size], + dtype=tf.float32, + ) + + if is_masked and idx == 0: + ones = tf.ones([field_nums[0], field_nums[0]], dtype=tf.float32) + mask_matrix = tf.linalg.band_part( + ones, 0, -1 + ) - tf.linalg.tensor_diag(tf.ones(field_nums[0])) + mask_matrix = tf.reshape( + mask_matrix, shape=[1, field_nums[0] * field_nums[0]] + ) + + dot_result = tf.multiply(dot_result, mask_matrix) * 2 + self.dot_result = dot_result + + curr_out = tf.nn.conv1d( + input=dot_result, filters=filters, stride=1, padding="VALID" + ) # shape : (B,D,H`) + + if bias: + b = tf.compat.v1.get_variable( + name="f_b" + str(idx), + shape=[layer_size], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + curr_out = tf.nn.bias_add(curr_out, b) + self.cross_params.append(b) + + if hparams.enable_BN is True: + curr_out = tf.compat.v1.layers.batch_normalization( + curr_out, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + curr_out = self._activate(curr_out, hparams.cross_activation) + + curr_out = tf.transpose(a=curr_out, perm=[0, 2, 1]) # shape : (B,H,D) + + if direct: + direct_connect = curr_out + next_hidden = curr_out + final_len += layer_size + field_nums.append(int(layer_size)) + + else: + if idx != len(hparams.cross_layer_sizes) - 1: + next_hidden, direct_connect = tf.split( + curr_out, 2 * [int(layer_size / 2)], 1 + ) + final_len += int(layer_size / 2) + else: + direct_connect = curr_out + next_hidden = 0 + final_len += layer_size + field_nums.append(int(layer_size / 2)) + + final_result.append(direct_connect) + hidden_nn_layers.append(next_hidden) + + self.cross_params.append(filters) + + result = tf.concat(final_result, axis=1) + result = tf.reduce_sum(input_tensor=result, axis=-1) # shape : (B,H) + + if res: + base_score = tf.reduce_sum( + input_tensor=result, axis=1, keepdims=True + ) # (B,1) + else: + base_score = 0 + + w_nn_output = tf.compat.v1.get_variable( + name="w_nn_output", shape=[final_len, 1], dtype=tf.float32 + ) + b_nn_output = tf.compat.v1.get_variable( + name="b_nn_output", + shape=[1], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + self.layer_params.append(w_nn_output) + self.layer_params.append(b_nn_output) + exFM_out = base_score + tf.compat.v1.nn.xw_plus_b( + result, w_nn_output, b_nn_output + ) + return exFM_out + + def _build_fast_CIN(self, nn_input, res=False, direct=False, bias=False): + """Construct the compressed interaction network with reduced parameters. + This component provides explicit and vector-wise higher-order feature interactions. + Parameters from the filters are reduced via a matrix decomposition method. + Fast CIN is more space and time efficient than CIN. + + Args: + nn_input (object): The output of field-embedding layer. This is the input for CIN. + res (bool): Whether use residual structure to fuse the results from each layer of CIN. + direct (bool): If true, then all hidden units are connected to both next layer and output layer; + otherwise, half of hidden units are connected to next layer and the other half will be connected to output layer. + bias (bool): Whether to add bias term when calculating the feature maps. + + Returns: + object: Prediction score made by fast CIN. + """ + hparams = self.hparams + hidden_nn_layers = [] + field_nums = [] + final_len = 0 + field_num = hparams.FIELD_COUNT + fast_CIN_d = hparams.fast_CIN_d + nn_input = tf.reshape( + nn_input, shape=[-1, int(field_num), hparams.dim] + ) # (B,F,D) + nn_input = tf.transpose(a=nn_input, perm=[0, 2, 1]) # (B,D,F) + field_nums.append(int(field_num)) + hidden_nn_layers.append(nn_input) + final_result = [] + with tf.compat.v1.variable_scope( + "exfm_part", initializer=self.initializer + ) as scope: # noqa: F841 + for idx, layer_size in enumerate(hparams.cross_layer_sizes): + if idx == 0: + fast_w = tf.compat.v1.get_variable( + "fast_CIN_w_" + str(idx), + shape=[1, field_nums[0], fast_CIN_d * layer_size], + dtype=tf.float32, + ) + + self.cross_params.append(fast_w) + dot_result_1 = tf.nn.conv1d( + input=nn_input, filters=fast_w, stride=1, padding="VALID" + ) # shape: (B,D,d*H) + dot_result_2 = tf.nn.conv1d( + input=tf.pow(nn_input, 2), + filters=tf.pow(fast_w, 2), + stride=1, + padding="VALID", + ) # shape: ((B,D,d*H) + dot_result = tf.reshape( + 0.5 * (dot_result_1 - dot_result_2), + shape=[-1, hparams.dim, layer_size, fast_CIN_d], + ) + curr_out = tf.reduce_sum( + input_tensor=dot_result, axis=3, keepdims=False + ) # shape: ((B,D,H) + else: + fast_w = tf.compat.v1.get_variable( + "fast_CIN_w_" + str(idx), + shape=[1, field_nums[0], fast_CIN_d * layer_size], + dtype=tf.float32, + ) + fast_v = tf.compat.v1.get_variable( + "fast_CIN_v_" + str(idx), + shape=[1, field_nums[-1], fast_CIN_d * layer_size], + dtype=tf.float32, + ) + + self.cross_params.append(fast_w) + self.cross_params.append(fast_v) + + dot_result_1 = tf.nn.conv1d( + input=nn_input, filters=fast_w, stride=1, padding="VALID" + ) # shape: ((B,D,d*H) + dot_result_2 = tf.nn.conv1d( + input=hidden_nn_layers[-1], + filters=fast_v, + stride=1, + padding="VALID", + ) # shape: ((B,D,d*H) + dot_result = tf.reshape( + tf.multiply(dot_result_1, dot_result_2), + shape=[-1, hparams.dim, layer_size, fast_CIN_d], + ) + curr_out = tf.reduce_sum( + input_tensor=dot_result, axis=3, keepdims=False + ) # shape: ((B,D,H) + + if bias: + b = tf.compat.v1.get_variable( + name="f_b" + str(idx), + shape=[1, 1, layer_size], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + curr_out = tf.nn.bias_add(curr_out, b) + self.cross_params.append(b) + + if hparams.enable_BN is True: + curr_out = tf.compat.v1.layers.batch_normalization( + curr_out, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + curr_out = self._activate(curr_out, hparams.cross_activation) + + if direct: + direct_connect = curr_out + next_hidden = curr_out + final_len += layer_size + field_nums.append(int(layer_size)) + + else: + if idx != len(hparams.cross_layer_sizes) - 1: + next_hidden, direct_connect = tf.split( + curr_out, 2 * [int(layer_size / 2)], 2 + ) + final_len += int(layer_size / 2) + field_nums.append(int(layer_size / 2)) + else: + direct_connect = curr_out + next_hidden = 0 + final_len += layer_size + field_nums.append(int(layer_size)) + + final_result.append(direct_connect) + hidden_nn_layers.append(next_hidden) + + result = tf.concat(final_result, axis=2) + result = tf.reduce_sum(input_tensor=result, axis=1, keepdims=False) # (B,H) + + if res: + base_score = tf.reduce_sum( + input_tensor=result, axis=1, keepdims=True + ) # (B,1) + else: + base_score = 0 + + w_nn_output = tf.compat.v1.get_variable( + name="w_nn_output", shape=[final_len, 1], dtype=tf.float32 + ) + b_nn_output = tf.compat.v1.get_variable( + name="b_nn_output", + shape=[1], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + self.layer_params.append(w_nn_output) + self.layer_params.append(b_nn_output) + exFM_out = ( + tf.compat.v1.nn.xw_plus_b(result, w_nn_output, b_nn_output) + base_score + ) + + return exFM_out + + def _build_dnn(self, embed_out, embed_layer_size): + """Construct the MLP part for the model. + This components provides implicit higher-order feature interactions. + + Args: + embed_out (object): The output of field-embedding layer. This is the input for DNN. + embed_layer_size (object): Shape of the embed_out + + Returns: + object: Prediction score made by fast CIN. + """ + hparams = self.hparams + w_fm_nn_input = embed_out + last_layer_size = embed_layer_size + layer_idx = 0 + hidden_nn_layers = [] + hidden_nn_layers.append(w_fm_nn_input) + with tf.compat.v1.variable_scope( + "nn_part", initializer=self.initializer + ) as scope: + for idx, layer_size in enumerate(hparams.layer_sizes): + curr_w_nn_layer = tf.compat.v1.get_variable( + name="w_nn_layer" + str(layer_idx), + shape=[last_layer_size, layer_size], + dtype=tf.float32, + ) + curr_b_nn_layer = tf.compat.v1.get_variable( + name="b_nn_layer" + str(layer_idx), + shape=[layer_size], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "w_nn_layer" + str(layer_idx), curr_w_nn_layer + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "b_nn_layer" + str(layer_idx), curr_b_nn_layer + ) + curr_hidden_nn_layer = tf.compat.v1.nn.xw_plus_b( + hidden_nn_layers[layer_idx], curr_w_nn_layer, curr_b_nn_layer + ) + scope = "nn_part" + str(idx) # noqa: F841 + activation = hparams.activation[idx] + + if hparams.enable_BN is True: + curr_hidden_nn_layer = tf.compat.v1.layers.batch_normalization( + curr_hidden_nn_layer, + momentum=0.95, + epsilon=0.0001, + training=self.is_train_stage, + ) + + curr_hidden_nn_layer = self._active_layer( + logit=curr_hidden_nn_layer, activation=activation, layer_idx=idx + ) + hidden_nn_layers.append(curr_hidden_nn_layer) + layer_idx += 1 + last_layer_size = layer_size + self.layer_params.append(curr_w_nn_layer) + self.layer_params.append(curr_b_nn_layer) + + w_nn_output = tf.compat.v1.get_variable( + name="w_nn_output", shape=[last_layer_size, 1], dtype=tf.float32 + ) + b_nn_output = tf.compat.v1.get_variable( + name="b_nn_output", + shape=[1], + dtype=tf.float32, + initializer=tf.compat.v1.zeros_initializer(), + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "w_nn_output" + str(layer_idx), w_nn_output + ) + tf.compat.v1.summary.histogram( + "nn_part/" + "b_nn_output" + str(layer_idx), b_nn_output + ) + self.layer_params.append(w_nn_output) + self.layer_params.append(b_nn_output) + nn_output = tf.compat.v1.nn.xw_plus_b( + hidden_nn_layers[-1], w_nn_output, b_nn_output + ) + return nn_output
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/fastai/fastai_utils.html b/_modules/recommenders/models/fastai/fastai_utils.html new file mode 100644 index 0000000000..c2852dfc4f --- /dev/null +++ b/_modules/recommenders/models/fastai/fastai_utils.html @@ -0,0 +1,472 @@ + + + + + + + + + + + recommenders.models.fastai.fastai_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.fastai.fastai_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+import numpy as np
+import pandas as pd
+import fastai
+import fastprogress
+from fastprogress.fastprogress import force_console_behavior
+
+from recommenders.utils import constants as cc
+
+
+
[docs]def cartesian_product(*arrays): + """Compute the Cartesian product in fastai algo. This is a helper function. + + Args: + arrays (tuple of numpy.ndarray): Input arrays + + Returns: + numpy.ndarray: product + + """ + la = len(arrays) + dtype = np.result_type(*arrays) + arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype) + for i, a in enumerate(np.ix_(*arrays)): + arr[..., i] = a + return arr.reshape(-1, la)
+ + +
[docs]def score( + learner, + test_df, + user_col=cc.DEFAULT_USER_COL, + item_col=cc.DEFAULT_ITEM_COL, + prediction_col=cc.DEFAULT_PREDICTION_COL, + top_k=None, +): + """Score all users+items provided and reduce to top_k items per user if top_k>0 + + Args: + learner (object): Model. + test_df (pandas.DataFrame): Test dataframe. + user_col (str): User column name. + item_col (str): Item column name. + prediction_col (str): Prediction column name. + top_k (int): Number of top items to recommend. + + Returns: + pandas.DataFrame: Result of recommendation + """ + # replace values not known to the model with NaN + total_users, total_items = learner.data.train_ds.x.classes.values() + test_df.loc[~test_df[user_col].isin(total_users), user_col] = np.nan + test_df.loc[~test_df[item_col].isin(total_items), item_col] = np.nan + + # map ids to embedding ids + u = learner.get_idx(test_df[user_col], is_item=False) + m = learner.get_idx(test_df[item_col], is_item=True) + + # score the pytorch model + pred = learner.model.forward(u, m) + scores = pd.DataFrame( + {user_col: test_df[user_col], item_col: test_df[item_col], prediction_col: pred} + ) + scores = scores.sort_values([user_col, prediction_col], ascending=[True, False]) + if top_k is not None: + top_scores = scores.groupby(user_col).head(top_k).reset_index(drop=True) + else: + top_scores = scores + return top_scores
+ + +
[docs]def hide_fastai_progress_bar(): + """Hide fastai progress bar""" + fastprogress.fastprogress.NO_BAR = True + fastprogress.fastprogress.WRITER_FN = str + master_bar, progress_bar = force_console_behavior() + fastai.basic_train.master_bar, fastai.basic_train.progress_bar = ( + master_bar, + progress_bar, + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/geoimc/geoimc_data.html b/_modules/recommenders/models/geoimc/geoimc_data.html new file mode 100644 index 0000000000..a146a09416 --- /dev/null +++ b/_modules/recommenders/models/geoimc/geoimc_data.html @@ -0,0 +1,659 @@ + + + + + + + + + + + recommenders.models.geoimc.geoimc_data — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.geoimc.geoimc_data

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import logging
+import pandas as pd
+import numpy as np
+from scipy.sparse import coo_matrix, isspmatrix_csr
+from sklearn.model_selection import train_test_split
+from sklearn.preprocessing import normalize
+
+from recommenders.utils.python_utils import binarize
+from .geoimc_utils import length_normalize, reduce_dims
+
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger("geoimc")
+
+
+
[docs]class DataPtr: + """ + Holds data and its respective indices + """ + + def __init__(self, data, entities): + """Initialize a data pointer + + Args: + data (csr_matrix): The target data matrix. + entities (Iterator): An iterator (of 2 elements (ndarray)) containing + the features of row, col entities. + """ + assert isspmatrix_csr(data) + + self.data = data + self.entities = entities + self.data_indices = None + self.entity_indices = [None, None] + +
[docs] def get_data(self): + """ + Returns: + csr_matrix: Target matrix (based on the data_indices filter) + """ + if self.data_indices is None: + return self.data + return self.data[self.data_indices]
+ +
[docs] def get_entity(self, of="row"): + """Get entity + + Args: + of (str): The entity, either 'row' or 'col' + Returns: + numpy.ndarray: Entity matrix (based on the entity_indices filter) + """ + idx = 0 if of == "row" else 1 + if self.entity_indices[idx] is None: + return self.entities[idx] + return self.entities[idx][self.entity_indices[idx]]
+ + +
[docs]class Dataset: + """ + Base class that holds necessary (minimal) information needed + """ + + def __init__(self, name, features_dim=0, normalize=False, target_transform=""): + """Initialize parameters + + Args: + name (str): Name of the dataset + features_dim (uint): Dimension of the features. If not 0, PCA is performed + on the features as the dimensionality reduction technique + normalize (bool): Normalize the features + target_transform (str): Transform the target values. Current options are + 'normalize' (Normalize the values), '' (Do nothing), 'binarize' (convert + the values using a threshold defined per dataset) + + """ + self.name = None + self.training_data = None + self.test_data = None + self.entities = None + + self.features_dim = features_dim + self.feat_normalize = normalize + self.target_transform = target_transform + +
[docs] def normalize(self): + """Normalizes the entity features""" + if self.feat_normalize: + for i in range(len(self.entities)): + if isspmatrix_csr(self.entities[i]): + logger.info("Normalizing CSR matrix") + self.entities[i] = normalize(self.entities[i]) + else: + self.entities[i] = length_normalize(self.entities[i])
+ +
[docs] def generate_train_test_data(self, data, test_ratio=0.3): + """Generate train, test split. The split is performed on the row + entities. So, this essentially becomes a cold start row entity test. + + Args: + data (csr_matrix): The entire target matrix. + test_ratio (float): Ratio of test split. + + """ + self.training_data = DataPtr(data, self.entities) + self.test_data = DataPtr(data, self.entities) + + self.training_data.data_indices, self.test_data.data_indices = train_test_split( + np.array(range(0, data.shape[0])), + test_size=test_ratio, + shuffle=True, + random_state=0, + ) + self.training_data.entity_indices[0] = self.training_data.data_indices + self.test_data.entity_indices[0] = self.test_data.data_indices
+ +
[docs] def reduce_dims(self): + """Reduces the dimensionality of entity features.""" + if self.features_dim != 0: + self.entities[0] = reduce_dims(self.entities[0], self.features_dim) + self.entities[1] = reduce_dims(self.entities[1], self.features_dim) + logger.info("Dimensionality reduced ...")
+ + +
[docs]class ML_100K(Dataset): + """ + Handles MovieLens-100K + """ + + def __init__(self, **kwargs): + super().__init__(self.__class__.__name__, **kwargs) + self.min_rating = 1 + self.max_rating = 5 + +
[docs] def df2coo(self, df): + """Convert the input dataframe into a coo matrix + + Args: + df (pandas.DataFrame): DataFrame containing the target matrix information. + """ + data = [] + row = list(df["user id"] - 1) + col = list(df["item id"] - 1) + for idx in range(0, len(df)): + val = df["rating"].iloc[idx] + data += [val] + + if self.target_transform == "normalize": + data = data / np.sqrt( + np.sum(np.arange(self.min_rating, self.max_rating + 1) ** 2) + ) + elif self.target_transform == "binarize": + data = binarize(np.array(data), 3) + + # TODO: Get this from `u.info` + return coo_matrix((data, (row, col)), shape=(943, 1682))
+ + def _read_from_file(self, path): + """Read the traget matrix from file at path. + + Args: + path (str): Path to the target matrix + """ + df = pd.read_csv( + path, + delimiter="\t", + names=["user id", "item id", "rating", "timestamp"], + encoding="ISO-8859-1", + ) + df.drop(["timestamp"], axis=1, inplace=True) + return self.df2coo(df) + +
[docs] def load_data(self, path): + """Load dataset + + Args: + path (str): Path to the directory containing ML100K dataset + e1_path (str): Path to the file containing row (user) features of ML100K dataset + e2_path (str): Path to the file containing col (movie) features of ML100K dataset + """ + self.entities = [ + self._load_user_features(f"{path}/u.user"), + self._load_item_features(f"{path}/u.item"), + ] + self.normalize() + self.reduce_dims() + self.training_data = DataPtr( + self._read_from_file(f"{path}/u1.base").tocsr(), self.entities + ) + self.test_data = DataPtr( + self._read_from_file(f"{path}/u1.test").tocsr(), self.entities + )
+ + def _load_user_features(self, path): + """Load user features + + Args: + path (str): Path to the file containing user features information + + """ + data = pd.read_csv( + path, + delimiter="|", + names=["user_id", "age", "gender", "occupation", "zip_code"], + ) + features_df = pd.concat( + [ + data["user_id"], + pd.get_dummies(data["user_id"]), + pd.get_dummies(data["age"]), + pd.get_dummies(data["gender"]), + pd.get_dummies(data["occupation"]), + pd.get_dummies(data["zip_code"]), + ], + axis=1, + ) + features_df.drop(["user_id"], axis=1, inplace=True) + user_features = np.nan_to_num(features_df.to_numpy()) + return user_features + + def _load_item_features(self, path): + """Load item features + + Args: + path (str): Path to the file containing item features information + + """ + header = [ + "movie_id", + "movie_title", + "release_date", + "video_release_date", + "IMDb_URL", + "unknown", + "Action", + "Adventure", + "Animation", + "Childrens", + "Comedy", + "Crime", + "Documentary", + "Drama", + "Fantasy", + "Film-Noir", + "Horror", + "Musical", + "Mystery", + "Romance", + "Sci-Fi", + "Thriller", + "War", + "Western", + ] + data = pd.read_csv(path, delimiter="|", names=header, encoding="ISO-8859-1") + + features_df = pd.concat( + [ + pd.get_dummies(data["movie_title"]), + pd.get_dummies(data["release_date"]), + pd.get_dummies("video_release_date"), + pd.get_dummies("IMDb_URL"), + data[header[5:]], + ], + axis=1, + ) + item_features = np.nan_to_num(features_df.to_numpy()) + return item_features
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/geoimc/geoimc_predict.html b/_modules/recommenders/models/geoimc/geoimc_predict.html new file mode 100644 index 0000000000..e26e0319b1 --- /dev/null +++ b/_modules/recommenders/models/geoimc/geoimc_predict.html @@ -0,0 +1,491 @@ + + + + + + + + + + + recommenders.models.geoimc.geoimc_predict — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.geoimc.geoimc_predict

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+from scipy.linalg import sqrtm
+
+from recommenders.utils.python_utils import binarize as conv_binary
+
+
+
[docs]class PlainScalarProduct(object): + """ + Module that implements plain scalar product + as the retrieval criterion + """ + + def __init__(self, X, Y, **kwargs): + """ + Args: + X: numpy matrix of shape (users, features) + Y: numpy matrix of shape (items, features) + """ + self.X = X + self.Y = Y + +
[docs] def sim(self, **kwargs): + """Calculate the similarity score""" + sim = self.X.dot(self.Y.T) + return sim
+ + +
[docs]class Inferer: + """ + Holds necessary (minimal) information needed for inference + """ + + def __init__(self, method="dot", k=10, transformation=""): + """Initialize parameters + + Args: + method (str): The inference method. Currently 'dot' + (Dot product) is supported. + k (uint): `k` for 'topk' transformation. + transformation (str): Transform the inferred values into a + different scale. Currently 'mean' (Binarize the values + using mean of inferred matrix as the threshold), 'topk' + (Pick Top-K inferred values per row and assign them 1, + setting rest of them to 0), '' (No transformation) are + supported. + """ + self.method = self._get_method(method) + self.k = k + self.transformation = transformation + + def _get_method(self, k): + """Get the inferer method + + Args: + k (str): The inferer name + + Returns: + class: A class object implementing the inferer 'k' + """ + if k == "dot": + method = PlainScalarProduct + else: + raise ValueError(f"{k} is unknown.") + return method + +
[docs] def infer(self, dataPtr, W, **kwargs): + """Main inference method + + Args: + dataPtr (DataPtr): An object containing the X, Z features needed for inference + W (iterable): An iterable containing the U, B, V parametrized matrices. + """ + + if isinstance(dataPtr, list): + a = dataPtr[0] + b = dataPtr[1] + else: + a = dataPtr.get_entity("row").dot(W[0]).dot(sqrtm(W[1])) + b = dataPtr.get_entity("col").dot(W[2]).dot(sqrtm(W[1])) + + sim_score = self.method(a, b).sim(**kwargs) + + if self.transformation == "mean": + prediction = conv_binary(sim_score, sim_score.mean()) + elif self.transformation == "topk": + masked_sim_score = sim_score.copy() + + for i in range(sim_score.shape[0]): + topKidx = np.argpartition(masked_sim_score[i], -self.k)[-self.k :] + mask = np.ones(sim_score[i].size, dtype=bool) + mask[topKidx] = False + + masked_sim_score[i][topKidx] = 1 + masked_sim_score[i][mask] = 0 + prediction = masked_sim_score + else: + prediction = sim_score + + return prediction
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/geoimc/geoimc_utils.html b/_modules/recommenders/models/geoimc/geoimc_utils.html new file mode 100644 index 0000000000..b2d4df150f --- /dev/null +++ b/_modules/recommenders/models/geoimc/geoimc_utils.html @@ -0,0 +1,431 @@ + + + + + + + + + + + recommenders.models.geoimc.geoimc_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.geoimc.geoimc_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+from sklearn.decomposition import PCA
+
+
+
[docs]def length_normalize(matrix): + """Length normalize the matrix + + Args: + matrix (np.ndarray): Input matrix that needs to be normalized + + Returns: + Normalized matrix + """ + norms = np.sqrt(np.sum(matrix**2, axis=1)) + norms[norms == 0] = 1 + return matrix / norms[:, np.newaxis]
+ + +
[docs]def mean_center(matrix): + """Performs mean centering across axis 0 + + Args: + matrix (np.ndarray): Input matrix that needs to be mean centered + """ + avg = np.mean(matrix, axis=0) + matrix -= avg
+ + +
[docs]def reduce_dims(matrix, target_dim): + """Reduce dimensionality of the data using PCA. + + Args: + matrix (np.ndarray): Matrix of the form (n_sampes, n_features) + target_dim (uint): Dimension to which n_features should be reduced to. + + """ + model = PCA(n_components=target_dim) + model.fit(matrix) + return model.transform(matrix)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/lightfm/lightfm_utils.html b/_modules/recommenders/models/lightfm/lightfm_utils.html new file mode 100644 index 0000000000..5e0a031232 --- /dev/null +++ b/_modules/recommenders/models/lightfm/lightfm_utils.html @@ -0,0 +1,654 @@ + + + + + + + + + + + recommenders.models.lightfm.lightfm_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.lightfm.lightfm_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import pandas as pd
+import numpy as np
+import seaborn as sns
+
+from lightfm.evaluation import precision_at_k, recall_at_k
+
+
+
[docs]def model_perf_plots(df): + """Function to plot model performance metrics. + + Args: + df (pandas.DataFrame): Dataframe in tidy format, with ['epoch','level','value'] columns + + Returns: + object: matplotlib axes + """ + g = sns.FacetGrid(df, col="metric", hue="stage", col_wrap=2, sharey=False) + g = g.map(sns.scatterplot, "epoch", "value").add_legend()
+ + +
[docs]def compare_metric(df_list, metric="prec", stage="test"): + """Function to combine and prepare list of dataframes into tidy format. + + Args: + df_list (list): List of dataframes + metrics (str): name of metric to be extracted, optional + stage (str): name of model fitting stage to be extracted, optional + + Returns: + pandas.DataFrame: Metrics + """ + colnames = ["model" + str(x) for x in list(range(1, len(df_list) + 1))] + models = [ + df[(df["stage"] == stage) & (df["metric"] == metric)]["value"] + .reset_index(drop=True) + .values + for df in df_list + ] + + output = pd.DataFrame(zip(*models), columns=colnames).stack().reset_index() + output.columns = ["epoch", "data", "value"] + return output
+ + +
[docs]def track_model_metrics( + model, + train_interactions, + test_interactions, + k=10, + no_epochs=100, + no_threads=8, + show_plot=True, + **kwargs +): + """Function to record model's performance at each epoch, formats the performance into tidy format, + plots the performance and outputs the performance data. + + Args: + model (LightFM instance): fitted LightFM model + train_interactions (scipy sparse COO matrix): train interactions set + test_interactions (scipy sparse COO matrix): test interaction set + k (int): number of recommendations, optional + no_epochs (int): Number of epochs to run, optional + no_threads (int): Number of parallel threads to use, optional + **kwargs: other keyword arguments to be passed down + + Returns: + pandas.DataFrame, LightFM model, matplotlib axes: + - Performance traces of the fitted model + - Fitted model + - Side effect of the method + """ + # initialising temp data storage + model_prec_train = [0] * no_epochs + model_prec_test = [0] * no_epochs + + model_rec_train = [0] * no_epochs + model_rec_test = [0] * no_epochs + + # fit model and store train/test metrics at each epoch + for epoch in range(no_epochs): + model.fit_partial( + interactions=train_interactions, epochs=1, num_threads=no_threads, **kwargs + ) + model_prec_train[epoch] = precision_at_k( + model, train_interactions, k=k, **kwargs + ).mean() + model_prec_test[epoch] = precision_at_k( + model, test_interactions, k=k, **kwargs + ).mean() + + model_rec_train[epoch] = recall_at_k( + model, train_interactions, k=k, **kwargs + ).mean() + model_rec_test[epoch] = recall_at_k( + model, test_interactions, k=k, **kwargs + ).mean() + + # collect the performance metrics into a dataframe + fitting_metrics = pd.DataFrame( + zip(model_prec_train, model_prec_test, model_rec_train, model_rec_test), + columns=[ + "model_prec_train", + "model_prec_test", + "model_rec_train", + "model_rec_test", + ], + ) + # convert into tidy format + fitting_metrics = fitting_metrics.stack().reset_index() + fitting_metrics.columns = ["epoch", "level", "value"] + # exact the labels for each observation + fitting_metrics["stage"] = fitting_metrics.level.str.split("_").str[-1] + fitting_metrics["metric"] = fitting_metrics.level.str.split("_").str[1] + fitting_metrics.drop(["level"], axis=1, inplace=True) + # replace the metric keys to improve visualisation + metric_keys = {"prec": "Precision", "rec": "Recall"} + fitting_metrics.metric.replace(metric_keys, inplace=True) + # plots the performance data + if show_plot: + model_perf_plots(fitting_metrics) + return fitting_metrics, model
+ + +
[docs]def similar_users(user_id, user_features, model, N=10): + """Function to return top N similar users based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681 + + Args: + user_id (int): id of user to be used as reference + user_features (scipy sparse CSR matrix): user feature matric + model (LightFM instance): fitted LightFM model + N (int): Number of top similar users to return + + Returns: + pandas.DataFrame: top N most similar users with score + """ + _, user_representations = model.get_user_representations(features=user_features) + + # Cosine similarity + scores = user_representations.dot(user_representations[user_id, :]) + user_norms = np.linalg.norm(user_representations, axis=1) + user_norms[user_norms == 0] = 1e-10 + scores /= user_norms + + best = np.argpartition(scores, -(N + 1))[-(N + 1) :] + return pd.DataFrame( + sorted(zip(best, scores[best] / user_norms[user_id]), key=lambda x: -x[1])[1:], + columns=["userID", "score"], + )
+ + +
[docs]def similar_items(item_id, item_features, model, N=10): + """Function to return top N similar items + based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681 + + Args: + item_id (int): id of item to be used as reference + item_features (scipy sparse CSR matrix): item feature matric + model (LightFM instance): fitted LightFM model + N (int): Number of top similar items to return + + Returns: + pandas.DataFrame: top N most similar items with score + """ + _, item_representations = model.get_item_representations(features=item_features) + + # Cosine similarity + scores = item_representations.dot(item_representations[item_id, :]) + item_norms = np.linalg.norm(item_representations, axis=1) + item_norms[item_norms == 0] = 1e-10 + scores /= item_norms + + best = np.argpartition(scores, -(N + 1))[-(N + 1) :] + return pd.DataFrame( + sorted(zip(best, scores[best] / item_norms[item_id]), key=lambda x: -x[1])[1:], + columns=["itemID", "score"], + )
+ + +
[docs]def prepare_test_df(test_idx, uids, iids, uid_map, iid_map, weights): + """Function to prepare test df for evaluation + + Args: + test_idx (slice): slice of test indices + uids (numpy.ndarray): Array of internal user indices + iids (numpy.ndarray): Array of internal item indices + uid_map (dict): Keys to map internal user indices to external ids. + iid_map (dict): Keys to map internal item indices to external ids. + weights (numpy.float32 coo_matrix): user-item interaction + + Returns: + pandas.DataFrame: user-item selected for testing + """ + test_df = pd.DataFrame( + zip( + uids[test_idx], + iids[test_idx], + [list(uid_map.keys())[x] for x in uids[test_idx]], + [list(iid_map.keys())[x] for x in iids[test_idx]], + ), + columns=["uid", "iid", "userID", "itemID"], + ) + + dok_weights = weights.todok() + test_df["rating"] = test_df.apply(lambda x: dok_weights[x.uid, x.iid], axis=1) + + return test_df[["userID", "itemID", "rating"]]
+ + +
[docs]def prepare_all_predictions( + data, + uid_map, + iid_map, + interactions, + model, + num_threads, + user_features=None, + item_features=None, +): + """Function to prepare all predictions for evaluation. + Args: + data (pandas df): dataframe of all users, items and ratings as loaded + uid_map (dict): Keys to map internal user indices to external ids. + iid_map (dict): Keys to map internal item indices to external ids. + interactions (np.float32 coo_matrix): user-item interaction + model (LightFM instance): fitted LightFM model + num_threads (int): number of parallel computation threads + user_features (np.float32 csr_matrix): User weights over features + item_features (np.float32 csr_matrix): Item weights over features + Returns: + pandas.DataFrame: all predictions + """ + users, items, preds = [], [], [] # noqa: F841 + item = list(data.itemID.unique()) + for user in data.userID.unique(): + user = [user] * len(item) + users.extend(user) + items.extend(item) + all_predictions = pd.DataFrame(data={"userID": users, "itemID": items}) + all_predictions["uid"] = all_predictions.userID.map(uid_map) + all_predictions["iid"] = all_predictions.itemID.map(iid_map) + + dok_weights = interactions.todok() + all_predictions["rating"] = all_predictions.apply( + lambda x: dok_weights[x.uid, x.iid], axis=1 + ) + + all_predictions = all_predictions[all_predictions.rating < 1].reset_index(drop=True) + all_predictions = all_predictions.drop("rating", axis=1) + + all_predictions["prediction"] = all_predictions.apply( + lambda x: model.predict( + user_ids=np.array([x["uid"]], dtype=np.int32), + item_ids=np.array([x["iid"]], dtype=np.int32), + user_features=user_features, + item_features=item_features, + num_threads=num_threads, + )[0], + axis=1, + ) + + return all_predictions[["userID", "itemID", "prediction"]]
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/lightgbm/lightgbm_utils.html b/_modules/recommenders/models/lightgbm/lightgbm_utils.html new file mode 100644 index 0000000000..e65eb7f302 --- /dev/null +++ b/_modules/recommenders/models/lightgbm/lightgbm_utils.html @@ -0,0 +1,596 @@ + + + + + + + + + + + recommenders.models.lightgbm.lightgbm_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.lightgbm.lightgbm_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import logging
+import numpy as np
+import category_encoders as ce
+from tqdm import tqdm
+import collections
+import gc
+
+
+
[docs]def unpackbits(x, num_bits): + """Convert a decimal value numpy.ndarray into multi-binary value numpy.ndarray ([1,2]->[[0,1],[1,0]]) + + Args: + x (numpy.ndarray): Decimal array. + num_bits (int): The max length of the converted binary value. + """ + xshape = list(x.shape) + x = x.reshape([-1, 1]) + to_and = 2 ** np.arange(num_bits).reshape([1, num_bits]) + return (x & to_and).astype(bool).astype(int).reshape(xshape + [num_bits])
+ + +
[docs]class NumEncoder(object): + """Encode all the categorical features into numerical ones by sequential label encoding, sequential count encoding, + and binary encoding. Additionally, it also filters the low-frequency categories and fills the missing values. + """ + + def __init__(self, cate_cols, nume_cols, label_col, threshold=10, thresrate=0.99): + """Constructor. + + Args: + cate_cols (list): The columns of categorical features. + nume_cols (list): The columns of numerical features. + label_col (object): The column of Label. + threshold (int): The categories whose frequency is lower than the threshold will be filtered (be treated + as "<LESS>"). + thresrate (float): The (1.0 - thersrate, default 1%) lowest-frequency categories will also be filtered. + """ + logging.basicConfig(level=logging.INFO, format="%(asctime)s [INFO] %(message)s") + self.label_name = label_col + self.cate_cols = cate_cols + self.dtype_dict = {} + for item in cate_cols: + self.dtype_dict[item] = "str" + for item in nume_cols: + self.dtype_dict[item] = "float" + self.nume_cols = nume_cols + self.tgt_nume_cols = [] + self.encoder = ce.ordinal.OrdinalEncoder(cols=cate_cols) + self.threshold = threshold + self.thresrate = thresrate + + self.save_cate_avgs = {} + self.save_value_filter = {} + self.save_num_embs = {} + self.Max_len = {} + self.samples = 0 + +
[docs] def fit_transform(self, df): + """Input a training set (pandas.DataFrame) and return the converted 2 numpy.ndarray (x,y). + + Args: + df (pandas.DataFrame): Input dataframe + + Returns: + numpy.ndarray, numpy.ndarray: New features and labels. + """ + df = df.astype(dtype=self.dtype_dict) + self.samples = df.shape[0] + logging.info("Filtering and fillna features") + for item in tqdm(self.cate_cols): + value_counts = df[item].value_counts() + num = value_counts.shape[0] + self.save_value_filter[item] = list( + value_counts[: int(num * self.thresrate)][ + value_counts > self.threshold + ].index + ) + rm_values = set(value_counts.index) - set(self.save_value_filter[item]) + df[item] = df[item].map(lambda x: "<LESS>" if x in rm_values else x) + df[item] = df[item].fillna("<UNK>") + del value_counts + gc.collect() + + for item in tqdm(self.nume_cols): + df[item] = df[item].fillna(df[item].mean()) + self.save_num_embs[item] = {"sum": df[item].sum(), "cnt": df[item].shape[0]} + + logging.info("Ordinal encoding cate features") + # ordinal_encoding + df = self.encoder.fit_transform(df) + + logging.info("Target encoding cate features") + # dynamic_targeting_encoding + for item in tqdm(self.cate_cols): + feats = df[item].values + labels = df[self.label_name].values + feat_encoding = {"mean": [], "count": []} + self.save_cate_avgs[item] = collections.defaultdict(lambda: [0, 0]) + for idx in range(self.samples): + cur_feat = feats[idx] + if cur_feat in self.save_cate_avgs[item]: + feat_encoding["mean"].append( + self.save_cate_avgs[item][cur_feat][0] + / self.save_cate_avgs[item][cur_feat][1] + ) + feat_encoding["count"].append( + self.save_cate_avgs[item][cur_feat][1] / idx + ) + else: + feat_encoding["mean"].append(0) + feat_encoding["count"].append(0) + self.save_cate_avgs[item][cur_feat][0] += labels[idx] + self.save_cate_avgs[item][cur_feat][1] += 1 + df[item + "_t_mean"] = feat_encoding["mean"] + df[item + "_t_count"] = feat_encoding["count"] + self.tgt_nume_cols.append(item + "_t_mean") + self.tgt_nume_cols.append(item + "_t_count") + + logging.info("Start manual binary encoding") + rows = None + for item in tqdm(self.nume_cols + self.tgt_nume_cols): + feats = df[item].values + if rows is None: + rows = feats.reshape((-1, 1)) + else: + rows = np.concatenate([rows, feats.reshape((-1, 1))], axis=1) + del feats + gc.collect() + for item in tqdm(self.cate_cols): + feats = df[item].values + Max = df[item].max() + bit_len = len(bin(Max)) - 2 + samples = self.samples + self.Max_len[item] = bit_len + res = unpackbits(feats, bit_len).reshape((samples, -1)) + rows = np.concatenate([rows, res], axis=1) + del feats + gc.collect() + trn_y = np.array(df[self.label_name].values).reshape((-1, 1)) + del df + gc.collect() + trn_x = np.array(rows) + return trn_x, trn_y
+ + # for test dataset +
[docs] def transform(self, df): + """Input a testing / validation set (pandas.DataFrame) and return the converted 2 numpy.ndarray (x,y). + + Args: + df (pandas.DataFrame): Input dataframe + + Returns: + numpy.ndarray, numpy.ndarray: New features and labels. + """ + df = df.astype(dtype=self.dtype_dict) + samples = df.shape[0] + logging.info("Filtering and fillna features") + for item in tqdm(self.cate_cols): + value_counts = df[item].value_counts() + rm_values = set(value_counts.index) - set(self.save_value_filter[item]) + df[item] = df[item].map(lambda x: "<LESS>" if x in rm_values else x) + df[item] = df[item].fillna("<UNK>") + + for item in tqdm(self.nume_cols): + mean = self.save_num_embs[item]["sum"] / self.save_num_embs[item]["cnt"] + df[item] = df[item].fillna(mean) + + logging.info("Ordinal encoding cate features") + # ordinal_encoding + df = self.encoder.transform(df) + + logging.info("Target encoding cate features") + # dynamic_targeting_encoding + for item in tqdm(self.cate_cols): + avgs = self.save_cate_avgs[item] + df[item + "_t_mean"] = df[item].map( + lambda x: avgs[x][0] / avgs[x][1] if x in avgs else 0 + ) + df[item + "_t_count"] = df[item].map( + lambda x: avgs[x][1] / self.samples if x in avgs else 0 + ) + + logging.info("Start manual binary encoding") + rows = None + for item in tqdm(self.nume_cols + self.tgt_nume_cols): + feats = df[item].values + if rows is None: + rows = feats.reshape((-1, 1)) + else: + rows = np.concatenate([rows, feats.reshape((-1, 1))], axis=1) + del feats + gc.collect() + for item in tqdm(self.cate_cols): + feats = df[item].values + bit_len = self.Max_len[item] + res = unpackbits(feats, bit_len).reshape((samples, -1)) + rows = np.concatenate([rows, res], axis=1) + del feats + gc.collect() + vld_y = np.array(df[self.label_name].values).reshape((-1, 1)) + del df + gc.collect() + vld_x = np.array(rows) + return vld_x, vld_y
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/ncf/dataset.html b/_modules/recommenders/models/ncf/dataset.html new file mode 100644 index 0000000000..43140f7fe9 --- /dev/null +++ b/_modules/recommenders/models/ncf/dataset.html @@ -0,0 +1,962 @@ + + + + + + + + + + + recommenders.models.ncf.dataset — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.ncf.dataset

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+from collections import OrderedDict
+import random
+import numpy as np
+import pandas as pd
+import csv
+import logging
+from tqdm import tqdm
+
+from recommenders.utils.constants import (
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_RATING_COL,
+)
+
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+
+
[docs]class EmptyFileException(Exception): + """Exception raised if file is empty"""
+ + +
[docs]class MissingFieldsException(Exception): + """Exception raised if file is missing expected fields"""
+ + +
[docs]class FileNotSortedException(Exception): + """Exception raised if file is not sorted correctly"""
+ + +
[docs]class MissingUserException(Exception): + """Exception raised if user is not in file"""
+ + +
[docs]class DataFile: + """ + DataFile class for NCF. Iterator to read data from a csv file. + Data must be sorted by user. Includes utilities for loading user data from + file, formatting it and returning a Pandas dataframe. + """ + + def __init__( + self, filename, col_user, col_item, col_rating, col_test_batch=None, binary=True + ): + """Constructor + + Args: + filename (str): Path to file to be processed. + col_user (str): User column name. + col_item (str): Item column name. + col_rating (str): Rating column name. + col_test_batch (str): Test batch column name. + binary (bool): If true, set rating > 0 to rating = 1. + """ + self.filename = filename + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_test_batch = col_test_batch + self.expected_fields = [self.col_user, self.col_item, self.col_rating] + if self.col_test_batch is not None: + self.expected_fields.append(self.col_test_batch) + self.binary = binary + self._init_data() + self.id2user = {self.user2id[k]: k for k in self.user2id} + self.id2item = {self.item2id[k]: k for k in self.item2id} + + @property + def users(self): + return self.user2id.keys() + + @property + def items(self): + return self.item2id.keys() + + @property + def end_of_file(self): + return (self.line_num > 0) and self.next_row is None + + def __iter__(self): + return self + + def __enter__(self, *args): + self.file = open(self.filename, "r", encoding="UTF8") + self.reader = csv.DictReader(self.file) + self._check_for_missing_fields(self.expected_fields) + self.line_num = 0 + self.row, self.next_row = None, None + return self + + def __exit__(self, *args): + self.file.close() + self.reader = None + self.line_num = 0 + self.row, self.next_row = None, None + + def __next__(self): + if self.next_row: + self.row = self.next_row + elif self.line_num == 0: + self.row = self._extract_row_data(next(self.reader, None)) + if self.row is None: + raise EmptyFileException("{} is empty.".format(self.filename)) + else: + raise StopIteration # end of file + self.next_row = self._extract_row_data(next(self.reader, None)) + self.line_num += 1 + + return self.row + + def _check_for_missing_fields(self, fields_to_check): + missing_fields = set(fields_to_check).difference(set(self.reader.fieldnames)) + if len(missing_fields): + raise MissingFieldsException( + "Columns {} not in header of file {}".format( + missing_fields, self.filename + ) + ) + + def _extract_row_data(self, row): + if row is None: + return row + user = int(row[self.col_user]) + item = int(row[self.col_item]) + rating = float(row[self.col_rating]) + if self.binary: + rating = float(rating > 0) + test_batch = None + if self.col_test_batch: + test_batch = int(row[self.col_test_batch]) + return { + self.col_user: user, + self.col_item: item, + self.col_rating: rating, + self.col_test_batch: test_batch, + } + + def _init_data(self): + # Compile lists of unique users and items, assign IDs to users and items, + # and ensure file is sorted by user (and batch index if test set) + logger.info("Indexing {} ...".format(self.filename)) + with self: + user_items = [] + self.item2id, self.user2id = OrderedDict(), OrderedDict() + batch_index = 0 + for _ in self: + item = self.row[self.col_item] + user = self.row[self.col_user] + test_batch = self.row[self.col_test_batch] + if not self.end_of_file: + next_user = self.next_row[self.col_user] + next_test_batch = self.next_row[self.col_test_batch] + if item not in self.items: + self.item2id[item] = len(self.item2id) + user_items.append(item) + + if (next_user != user) or self.next_row is None: + if not self.end_of_file: + if next_user in self.users: + raise FileNotSortedException( + "File {} is not sorted by user".format(self.filename) + ) + self.user2id[user] = len(self.user2id) + if self.col_test_batch: + if (next_test_batch != test_batch) or self.next_row is None: + if not self.end_of_file: + if next_test_batch < batch_index: + raise FileNotSortedException( + "File {} is not sorted by {}".format( + self.filename, self.col_test_batch + ) + ) + batch_index += 1 + self.batch_indices_range = range(0, batch_index) + self.data_len = self.line_num + +
[docs] def load_data(self, key, by_user=True): + """Load data for a specified user or test batch + + Args: + key (int): user or test batch index + by_user (bool): load data by usr if True, else by test batch + + Returns: + pandas.DataFrame + """ + records = [] + key_col = self.col_user if by_user else self.col_test_batch + + # fast forward in file to user/test batch + while (self.line_num == 0) or (self.row[key_col] != key): + if self.end_of_file: + raise MissingUserException( + "User {} not in file {}".format(key, self.filename) + ) + next(self) + # collect user/test batch data + while self.row[key_col] == key: + row = self.row + if self.col_test_batch in row: + del row[self.col_test_batch] + records.append(row) + if not self.end_of_file: + next(self) + else: + break + return pd.DataFrame.from_records(records)
+ + +
[docs]class NegativeSampler: + """NegativeSampler class for NCF. Samples a subset of negative items from a given population of items.""" + + def __init__( + self, + user, + n_samples, + user_positive_item_pool, + item_pool, + sample_with_replacement, + print_warnings=True, + training=True, + ): + """Constructor + + Args: + user (str or int): User to be sampled for. + n_samples (int): Number of required samples. + user_positive_item_pool (set): Set of items with which user has previously interacted. + item_pool (set): Set of all items in population. + sample_with_replacement (bool): If true, sample negative examples with replacement, + otherwise without replacement. + print_warnings (bool): If true, prints warnings if sampling without replacement and + there are not enough items to sample from to satisfy n_neg or n_neg_test. + training (bool): Set to true if sampling for the training set or false if for the test set. + """ + self.user = user + self.n_samples = n_samples + self.user_positive_item_pool = user_positive_item_pool + self.item_pool = item_pool + self.sample_with_replacement = sample_with_replacement + self.print_warnings = print_warnings + self.training = training + + self.user_negative_item_pool = self._get_user_negatives_pool() + self.population_size = len(self.user_negative_item_pool) + self._sample = ( + self._sample_negatives_with_replacement + if self.sample_with_replacement + else self._sample_negatives_without_replacement + ) + if not self.sample_with_replacement: + self._check_sample_size() + +
[docs] def sample(self): + """Method for sampling uniformly from a population of negative items + + Returns: list + """ + return self._sample()
+ + def _get_user_negatives_pool(self): + # get list of items user has not interacted with + return list(set(self.item_pool) - self.user_positive_item_pool) + + def _sample_negatives_with_replacement(self): + return random.choices(self.user_negative_item_pool, k=self.n_samples) + + def _sample_negatives_without_replacement(self): + return random.sample(self.user_negative_item_pool, k=self.n_samples) + + def _check_sample_size(self): + # if sampling without replacement, check sample population is sufficient and reduce + # n_samples if not. + n_neg_var = "n_neg" if self.training else "n_neg_test" + dataset_name = "training" if self.training else "test" + + k = min(self.n_samples, self.population_size) + if k < self.n_samples and self.print_warnings: + warning_string = ( + "The population of negative items to sample from is too small for user {}. " + "Samples needed = {}, negative items = {}. " + "Reducing samples to {} for this user." + "If an equal number of negative samples for each user is required in the {} set, sample with replacement or reduce {}. " + "This warning can be turned off by setting print_warnings=False".format( + self.user, + self.n_samples, + self.population_size, + self.population_size, + dataset_name, + n_neg_var, + ) + ) + logging.warning(warning_string) + self.n_samples = k
+ + +
[docs]class Dataset(object): + """Dataset class for NCF""" + + def __init__( + self, + train_file, + test_file=None, + test_file_full=None, + overwrite_test_file_full=False, + n_neg=4, + n_neg_test=100, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + binary=True, + seed=None, + sample_with_replacement=False, + print_warnings=False, + ): + """Constructor + + Args: + train_file (str): Path to training dataset file. + test_file (str): Path to test dataset file for leave-one-out evaluation. + test_file_full (str): Path to full test dataset file including negative samples. + overwrite_test_file_full (bool): If true, recreate and overwrite test_file_full. + n_neg (int): Number of negative samples per positive example for training set. + n_neg_test (int): Number of negative samples per positive example for test set. + col_user (str): User column name. + col_item (str): Item column name. + col_rating (str): Rating column name. + binary (bool): If true, set rating > 0 to rating = 1. + seed (int): Seed. + sample_with_replacement (bool): If true, sample negative examples with replacement, + otherwise without replacement. + print_warnings (bool): If true, prints warnings if sampling without replacement and + there are not enough items to sample from to satisfy n_neg or n_neg_test. + """ + self.train_file = train_file + self.test_file = test_file + self.test_file_full = test_file_full + self.overwrite_test_file_full = overwrite_test_file_full + self.n_neg = n_neg + self.n_neg_test = n_neg_test + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.binary = binary + self.sample_with_replacement = sample_with_replacement + self.print_warnings = print_warnings + + self.col_test_batch = "test_batch" + + self.train_datafile = DataFile( + filename=self.train_file, + col_user=self.col_user, + col_item=self.col_item, + col_rating=self.col_rating, + binary=self.binary, + ) + + self.n_users = len(self.train_datafile.users) + self.n_items = len(self.train_datafile.items) + self.user2id = self.train_datafile.user2id + self.item2id = self.train_datafile.item2id + self.id2user = self.train_datafile.id2user + self.id2item = self.train_datafile.id2item + self.train_len = self.train_datafile.data_len + + if self.test_file is not None: + self.test_datafile = DataFile( + filename=self.test_file, + col_user=self.col_user, + col_item=self.col_item, + col_rating=self.col_rating, + binary=self.binary, + ) + if self.test_file_full is None: + self.test_file_full = os.path.splitext(self.test_file)[0] + "_full.csv" + if self.overwrite_test_file_full or not os.path.isfile(self.test_file_full): + self._create_test_file() + self.test_full_datafile = DataFile( + filename=self.test_file_full, + col_user=self.col_user, + col_item=self.col_item, + col_rating=self.col_rating, + col_test_batch=self.col_test_batch, + binary=self.binary, + ) + # set random seed + random.seed(seed) + + def _create_negative_examples_df(self, user, user_negative_samples): + # create dataframe containing negative examples for user assigned zero rating + n_samples = len(user_negative_samples) + return pd.DataFrame( + { + self.col_user: [user] * n_samples, + self.col_item: user_negative_samples, + self.col_rating: [0.0] * n_samples, + } + ) + + def _create_test_file(self): + + logger.info( + "Creating full leave-one-out test file {} ...".format(self.test_file_full) + ) + + # create empty csv + pd.DataFrame( + columns=[self.col_user, self.col_item, self.col_rating, self.col_test_batch] + ).to_csv(self.test_file_full, index=False) + + batch_idx = 0 + + with self.train_datafile as train_datafile: + with self.test_datafile as test_datafile: + for user in tqdm(test_datafile.users): + if user in train_datafile.users: + user_test_data = test_datafile.load_data(user) + user_train_data = train_datafile.load_data(user) + # for leave-one-out evaluation, exclude items seen in both training and test sets + # when sampling negatives + user_positive_item_pool = set( + user_test_data[self.col_item].unique() + ).union(user_train_data[self.col_item].unique()) + sampler = NegativeSampler( + user, + self.n_neg_test, + user_positive_item_pool, + self.train_datafile.items, + self.sample_with_replacement, + self.print_warnings, + training=False, + ) + + user_examples_dfs = [] + # sample n_neg_test negatives for each positive example and assign a batch index + for positive_example in np.array_split( + user_test_data, user_test_data.shape[0] + ): + negative_examples = self._create_negative_examples_df( + user, sampler.sample() + ) + examples = pd.concat([positive_example, negative_examples]) + examples[self.col_test_batch] = batch_idx + user_examples_dfs.append(examples) + batch_idx += 1 + # append user test data to file + user_examples = pd.concat(user_examples_dfs) + user_examples.to_csv( + self.test_file_full, mode="a", index=False, header=False + ) + + def _split_into_batches(self, shuffle_buffer, batch_size): + for i in range(0, len(shuffle_buffer), batch_size): + yield shuffle_buffer[i : i + batch_size] + + def _prepare_batch_with_id(self, batch): + return [ + [self.user2id[user] for user in batch[self.col_user].values], + [self.item2id[item] for item in batch[self.col_item].values], + batch[self.col_rating].values.tolist(), + ] + + def _prepare_batch_without_id(self, batch): + return [ + batch[self.col_user].values.tolist(), + batch[self.col_item].values.tolist(), + batch[self.col_rating].values.tolist(), + ] + + def _release_shuffle_buffer( + self, shuffle_buffer, batch_size, yield_id, write_to=None + ): + prepare_batch = ( + self._prepare_batch_with_id if yield_id else self._prepare_batch_without_id + ) + shuffle_buffer_df = pd.concat(shuffle_buffer) + shuffle_buffer_df = shuffle_buffer_df.sample( + shuffle_buffer_df.shape[0] + ) # shuffle the buffer + for batch in self._split_into_batches(shuffle_buffer_df, batch_size): + if batch.shape[0] == batch_size: + if write_to: + batch.to_csv(write_to, mode="a", header=False, index=False) + yield prepare_batch(batch) + else: + return batch + +
[docs] def train_loader( + self, batch_size, shuffle_size=None, yield_id=False, write_to=None + ): + """ + Generator for serving batches of training data. Positive examples are loaded from the + original training file, to which negative samples are added. Data is loaded in memory into a + shuffle buffer up to a maximum of shuffle_size rows, before the data is shuffled and released. + If out-of-memory errors are encountered, try reducing shuffle_size. + + Args: + batch_size (int): Number of examples in each batch. + shuffle_size (int): Maximum number of examples in shuffle buffer. + yield_id (bool): If true, return assigned user and item IDs, else return original values. + write_to (str): Path of file to write full dataset (including negative examples). + + Returns: + list + """ + + # if shuffle_size not supplied, use (estimated) full data size i.e. complete in-memory shuffle + if shuffle_size is None: + shuffle_size = self.train_len * (self.n_neg + 1) + if write_to: + pd.DataFrame( + columns=[self.col_user, self.col_item, self.col_rating] + ).to_csv(write_to, header=True, index=False) + shuffle_buffer = [] + + with self.train_datafile as train_datafile: + for user in train_datafile.users: + user_positive_examples = train_datafile.load_data(user) + user_positive_item_pool = set( + user_positive_examples[self.col_item].unique() + ) + n_samples = self.n_neg * user_positive_examples.shape[0] + sampler = NegativeSampler( + user, + n_samples, + user_positive_item_pool, + self.train_datafile.items, + self.sample_with_replacement, + self.print_warnings, + ) + user_negative_examples = self._create_negative_examples_df( + user, sampler.sample() + ) + user_examples = pd.concat( + [user_positive_examples, user_negative_examples] + ) + shuffle_buffer.append(user_examples) + shuffle_buffer_len = sum([df.shape[0] for df in shuffle_buffer]) + if shuffle_buffer_len >= shuffle_size: + buffer_remainder = yield from self._release_shuffle_buffer( + shuffle_buffer, batch_size, yield_id, write_to + ) + shuffle_buffer = ( + [buffer_remainder] if buffer_remainder is not None else [] + ) + # yield remaining buffer + yield from self._release_shuffle_buffer( + shuffle_buffer, batch_size, yield_id, write_to + )
+ +
[docs] def test_loader(self, yield_id=False): + """Generator for serving batches of test data for leave-one-out evaluation. Data is loaded from test_file_full. + + Args: + yield_id (bool): If true, return assigned user and item IDs, else return original values. + + Returns: + list + """ + prepare_batch = ( + self._prepare_batch_with_id if yield_id else self._prepare_batch_without_id + ) + + with self.test_full_datafile as test_full_datafile: + for test_batch_idx in test_full_datafile.batch_indices_range: + test_batch_data = test_full_datafile.load_data( + test_batch_idx, by_user=False + ) + yield prepare_batch(test_batch_data)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/ncf/ncf_singlenode.html b/_modules/recommenders/models/ncf/ncf_singlenode.html new file mode 100644 index 0000000000..7832f001bf --- /dev/null +++ b/_modules/recommenders/models/ncf/ncf_singlenode.html @@ -0,0 +1,839 @@ + + + + + + + + + + + recommenders.models.ncf.ncf_singlenode — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.ncf.ncf_singlenode

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import numpy as np
+import tensorflow as tf
+import tf_slim as slim
+from time import time
+import logging
+
+
+tf.compat.v1.disable_eager_execution()
+logger = logging.getLogger(__name__)
+MODEL_CHECKPOINT = "model.ckpt"
+
+
+
[docs]class NCF: + """Neural Collaborative Filtering (NCF) implementation + + :Citation: + + He, Xiangnan, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. "Neural collaborative filtering." + In Proceedings of the 26th International Conference on World Wide Web, pp. 173-182. International World Wide Web + Conferences Steering Committee, 2017. Link: https://www.comp.nus.edu.sg/~xiangnan/papers/ncf.pdf + """ + + def __init__( + self, + n_users, + n_items, + model_type="NeuMF", + n_factors=8, + layer_sizes=[16, 8, 4], + n_epochs=50, + batch_size=64, + learning_rate=5e-3, + verbose=1, + seed=None, + ): + """Constructor + + Args: + n_users (int): Number of users in the dataset. + n_items (int): Number of items in the dataset. + model_type (str): Model type. + n_factors (int): Dimension of latent space. + layer_sizes (list): Number of layers for MLP. + n_epochs (int): Number of epochs for training. + batch_size (int): Batch size. + learning_rate (float): Learning rate. + verbose (int): Whether to show the training output or not. + seed (int): Seed. + + """ + + # seed + tf.compat.v1.set_random_seed(seed) + np.random.seed(seed) + self.seed = seed + + self.n_users = n_users + self.n_items = n_items + self.model_type = model_type.lower() + self.n_factors = n_factors + self.layer_sizes = layer_sizes + self.n_epochs = n_epochs + self.verbose = verbose + self.batch_size = batch_size + self.learning_rate = learning_rate + + # check model type + model_options = ["gmf", "mlp", "neumf"] + if self.model_type not in model_options: + raise ValueError( + "Wrong model type, please select one of this list: {}".format( + model_options + ) + ) + + # ncf layer input size + self.ncf_layer_size = n_factors + layer_sizes[-1] + # create ncf model + self._create_model() + # set GPU use with demand growth + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + # set TF Session + self.sess = tf.compat.v1.Session( + config=tf.compat.v1.ConfigProto(gpu_options=gpu_options) + ) + # parameters initialization + self.sess.run(tf.compat.v1.global_variables_initializer()) + + def _create_model( + self, + ): + # reset graph + tf.compat.v1.reset_default_graph() + + with tf.compat.v1.variable_scope("input_data", reuse=tf.compat.v1.AUTO_REUSE): + + # input: index of users, items and ground truth + self.user_input = tf.compat.v1.placeholder(tf.int32, shape=[None, 1]) + self.item_input = tf.compat.v1.placeholder(tf.int32, shape=[None, 1]) + self.labels = tf.compat.v1.placeholder(tf.float32, shape=[None, 1]) + + with tf.compat.v1.variable_scope("embedding", reuse=tf.compat.v1.AUTO_REUSE): + + # set embedding table + self.embedding_gmf_P = tf.Variable( + tf.random.truncated_normal( + shape=[self.n_users, self.n_factors], + mean=0.0, + stddev=0.01, + seed=self.seed, + ), + name="embedding_gmf_P", + dtype=tf.float32, + ) + + self.embedding_gmf_Q = tf.Variable( + tf.random.truncated_normal( + shape=[self.n_items, self.n_factors], + mean=0.0, + stddev=0.01, + seed=self.seed, + ), + name="embedding_gmf_Q", + dtype=tf.float32, + ) + + # set embedding table + self.embedding_mlp_P = tf.Variable( + tf.random.truncated_normal( + shape=[self.n_users, int(self.layer_sizes[0] / 2)], + mean=0.0, + stddev=0.01, + seed=self.seed, + ), + name="embedding_mlp_P", + dtype=tf.float32, + ) + + self.embedding_mlp_Q = tf.Variable( + tf.random.truncated_normal( + shape=[self.n_items, int(self.layer_sizes[0] / 2)], + mean=0.0, + stddev=0.01, + seed=self.seed, + ), + name="embedding_mlp_Q", + dtype=tf.float32, + ) + + with tf.compat.v1.variable_scope("gmf", reuse=tf.compat.v1.AUTO_REUSE): + + # get user embedding p and item embedding q + self.gmf_p = tf.reduce_sum( + input_tensor=tf.nn.embedding_lookup( + params=self.embedding_gmf_P, ids=self.user_input + ), + axis=1, + ) + self.gmf_q = tf.reduce_sum( + input_tensor=tf.nn.embedding_lookup( + params=self.embedding_gmf_Q, ids=self.item_input + ), + axis=1, + ) + + # get gmf vector + self.gmf_vector = self.gmf_p * self.gmf_q + + with tf.compat.v1.variable_scope("mlp", reuse=tf.compat.v1.AUTO_REUSE): + + # get user embedding p and item embedding q + self.mlp_p = tf.reduce_sum( + input_tensor=tf.nn.embedding_lookup( + params=self.embedding_mlp_P, ids=self.user_input + ), + axis=1, + ) + self.mlp_q = tf.reduce_sum( + input_tensor=tf.nn.embedding_lookup( + params=self.embedding_mlp_Q, ids=self.item_input + ), + axis=1, + ) + + # concatenate user and item vector + output = tf.concat([self.mlp_p, self.mlp_q], 1) + + # MLP Layers + for layer_size in self.layer_sizes[1:]: + output = slim.layers.fully_connected( + output, + num_outputs=layer_size, + activation_fn=tf.nn.relu, + weights_initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution="uniform", + seed=self.seed, + ), + ) + self.mlp_vector = output + + # self.output = tf.sigmoid(tf.reduce_sum(self.mlp_vector, axis=1, keepdims=True)) + + with tf.compat.v1.variable_scope("ncf", reuse=tf.compat.v1.AUTO_REUSE): + + if self.model_type == "gmf": + # GMF only + output = slim.layers.fully_connected( + self.gmf_vector, + num_outputs=1, + activation_fn=None, + biases_initializer=None, + weights_initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution="uniform", + seed=self.seed, + ), + ) + self.output = tf.sigmoid(output) + + elif self.model_type == "mlp": + # MLP only + output = slim.layers.fully_connected( + self.mlp_vector, + num_outputs=1, + activation_fn=None, + biases_initializer=None, + weights_initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution="uniform", + seed=self.seed, + ), + ) + self.output = tf.sigmoid(output) + + elif self.model_type == "neumf": + # concatenate GMF and MLP vector + self.ncf_vector = tf.concat([self.gmf_vector, self.mlp_vector], 1) + # get predicted rating score + output = slim.layers.fully_connected( + self.ncf_vector, + num_outputs=1, + activation_fn=None, + biases_initializer=None, + weights_initializer=tf.compat.v1.keras.initializers.VarianceScaling( + scale=1.0, + mode="fan_avg", + distribution="uniform", + seed=self.seed, + ), + ) + self.output = tf.sigmoid(output) + + with tf.compat.v1.variable_scope("loss", reuse=tf.compat.v1.AUTO_REUSE): + + # set loss function + self.loss = tf.compat.v1.losses.log_loss(self.labels, self.output) + + with tf.compat.v1.variable_scope("optimizer", reuse=tf.compat.v1.AUTO_REUSE): + + # set optimizer + self.optimizer = tf.compat.v1.train.AdamOptimizer( + learning_rate=self.learning_rate + ).minimize(self.loss) + +
[docs] def save(self, dir_name): + """Save model parameters in `dir_name` + + Args: + dir_name (str): directory name, which should be a folder name instead of file name + we will create a new directory if not existing. + """ + # save trained model + if not os.path.exists(dir_name): + os.makedirs(dir_name) + saver = tf.compat.v1.train.Saver() + saver.save(self.sess, os.path.join(dir_name, MODEL_CHECKPOINT))
+ +
[docs] def load(self, gmf_dir=None, mlp_dir=None, neumf_dir=None, alpha=0.5): + """Load model parameters for further use. + + GMF model --> load parameters in `gmf_dir` + + MLP model --> load parameters in `mlp_dir` + + NeuMF model --> load parameters in `neumf_dir` or in `gmf_dir` and `mlp_dir` + + Args: + gmf_dir (str): Directory name for GMF model. + mlp_dir (str): Directory name for MLP model. + neumf_dir (str): Directory name for neumf model. + alpha (float): the concatenation hyper-parameter for gmf and mlp output layer. + + Returns: + object: Load parameters in this model. + """ + + # load pre-trained model + if self.model_type == "gmf" and gmf_dir is not None: + saver = tf.compat.v1.train.Saver() + saver.restore(self.sess, os.path.join(gmf_dir, MODEL_CHECKPOINT)) + + elif self.model_type == "mlp" and mlp_dir is not None: + saver = tf.compat.v1.train.Saver() + saver.restore(self.sess, os.path.join(mlp_dir, MODEL_CHECKPOINT)) + + elif self.model_type == "neumf" and neumf_dir is not None: + saver = tf.compat.v1.train.Saver() + saver.restore(self.sess, os.path.join(neumf_dir, MODEL_CHECKPOINT)) + + elif self.model_type == "neumf" and gmf_dir is not None and mlp_dir is not None: + # load neumf using gmf and mlp + self._load_neumf(gmf_dir, mlp_dir, alpha) + + else: + raise NotImplementedError
+ + def _load_neumf(self, gmf_dir, mlp_dir, alpha): + """Load gmf and mlp model parameters for further use in NeuMF. + NeuMF model --> load parameters in `gmf_dir` and `mlp_dir` + """ + # load gmf part + variables = tf.compat.v1.global_variables() + # get variables with 'gmf' + var_flow_restore = [ + val for val in variables if "gmf" in val.name and "ncf" not in val.name + ] + # load 'gmf' variable + saver = tf.compat.v1.train.Saver(var_flow_restore) + # restore + saver.restore(self.sess, os.path.join(gmf_dir, MODEL_CHECKPOINT)) + + # load mlp part + variables = tf.compat.v1.global_variables() + # get variables with 'gmf' + var_flow_restore = [ + val for val in variables if "mlp" in val.name and "ncf" not in val.name + ] + # load 'gmf' variable + saver = tf.compat.v1.train.Saver(var_flow_restore) + # restore + saver.restore(self.sess, os.path.join(mlp_dir, MODEL_CHECKPOINT)) + + # concat pretrain h_from_gmf and h_from_mlp + vars_list = tf.compat.v1.get_collection( + tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, scope="ncf" + ) + + assert len(vars_list) == 1 + ncf_fc = vars_list[0] + + # get weight from gmf and mlp + gmf_fc = tf.train.load_variable(gmf_dir, ncf_fc.name) + mlp_fc = tf.train.load_variable(mlp_dir, ncf_fc.name) + + # load fc layer by tf.concat + assign_op = tf.compat.v1.assign( + ncf_fc, tf.concat([alpha * gmf_fc, (1 - alpha) * mlp_fc], axis=0) + ) + self.sess.run(assign_op) + +
[docs] def fit(self, data): + """Fit model with training data + + Args: + data (NCFDataset): initilized Dataset in ./dataset.py + """ + + # get user and item mapping dict + self.user2id = data.user2id + self.item2id = data.item2id + self.id2user = data.id2user + self.id2item = data.id2item + + # loop for n_epochs + for epoch_count in range(1, self.n_epochs + 1): + + # negative sampling for training + train_begin = time() + + # initialize + train_loss = [] + + # calculate loss and update NCF parameters + for user_input, item_input, labels in data.train_loader(self.batch_size): + + user_input = np.array([self.user2id[x] for x in user_input]) + item_input = np.array([self.item2id[x] for x in item_input]) + labels = np.array(labels) + + feed_dict = { + self.user_input: user_input[..., None], + self.item_input: item_input[..., None], + self.labels: labels[..., None], + } + + # get loss and execute optimization + loss, _ = self.sess.run([self.loss, self.optimizer], feed_dict) + train_loss.append(loss) + train_time = time() - train_begin + + # output every self.verbose + if self.verbose and epoch_count % self.verbose == 0: + logger.info( + "Epoch %d [%.2fs]: train_loss = %.6f " + % (epoch_count, train_time, sum(train_loss) / len(train_loss)) + )
+ +
[docs] def predict(self, user_input, item_input, is_list=False): + """Predict function of this trained model + + Args: + user_input (list or element of list): userID or userID list + item_input (list or element of list): itemID or itemID list + is_list (bool): if true, the input is list type + noting that list-wise type prediction is faster than element-wise's. + + Returns: + list or float: A list of predicted rating or predicted rating score. + """ + + if is_list: + output = self._predict(user_input, item_input) + return list(output.reshape(-1)) + + else: + output = self._predict(np.array([user_input]), np.array([item_input])) + return float(output.reshape(-1)[0])
+ + def _predict(self, user_input, item_input): + + # index converting + user_input = np.array([self.user2id[x] for x in user_input]) + item_input = np.array([self.item2id[x] for x in item_input]) + + # get feed dict + feed_dict = { + self.user_input: user_input[..., None], + self.item_input: item_input[..., None], + } + + # calculate predicted score + return self.sess.run(self.output, feed_dict)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/io/mind_all_iterator.html b/_modules/recommenders/models/newsrec/io/mind_all_iterator.html new file mode 100644 index 0000000000..1541c6e02b --- /dev/null +++ b/_modules/recommenders/models/newsrec/io/mind_all_iterator.html @@ -0,0 +1,991 @@ + + + + + + + + + + + recommenders.models.newsrec.io.mind_all_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.io.mind_all_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import numpy as np
+import pickle
+
+from recommenders.models.deeprec.io.iterator import BaseIterator
+from recommenders.models.newsrec.newsrec_utils import word_tokenize, newsample
+
+__all__ = ["MINDAllIterator"]
+
+
+
[docs]class MINDAllIterator(BaseIterator): + """Train data loader for NAML model. + The model require a special type of data format, where each instance contains a label, impresion id, user id, + the candidate news articles and user's clicked news article. Articles are represented by title words, + body words, verts and subverts. + + Iterator will not load the whole data into memory. Instead, it loads data into memory + per mini-batch, so that large files can be used as input data. + + Attributes: + col_spliter (str): column spliter in one line. + ID_spliter (str): ID spliter in one line. + batch_size (int): the samples num in one batch. + title_size (int): max word num in news title. + body_size (int): max word num in news body (abstract used in MIND). + his_size (int): max clicked news num in user click history. + npratio (int): negaive and positive ratio used in negative sampling. -1 means no need of negtive sampling. + """ + + def __init__( + self, + hparams, + npratio=-1, + col_spliter="\t", + ID_spliter="%", + ): + """Initialize an iterator. Create necessary placeholders for the model. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as head_num and head_dim are there. + graph (object): the running graph. All created placeholder will be added to this graph. + col_spliter (str): column spliter in one line. + ID_spliter (str): ID spliter in one line. + """ + self.col_spliter = col_spliter + self.ID_spliter = ID_spliter + self.batch_size = hparams.batch_size + self.title_size = hparams.title_size + self.body_size = hparams.body_size + self.his_size = hparams.his_size + self.npratio = npratio + + self.word_dict = self.load_dict(hparams.wordDict_file) + self.vert_dict = self.load_dict(hparams.vertDict_file) + self.subvert_dict = self.load_dict(hparams.subvertDict_file) + self.uid2index = self.load_dict(hparams.userDict_file) + +
[docs] def load_dict(self, file_path): + """Load pickled file + + Args: + file path (str): File path + + Returns: + object: pickle load obj + """ + with open(file_path, "rb") as f: + return pickle.load(f)
+ +
[docs] def init_news(self, news_file): + """Init news information given news file, such as `news_title_index`, `news_abstract_index`. + + Args: + news_file: path of news file + """ + self.nid2index = {} + news_title = [""] + news_ab = [""] + news_vert = [""] + news_subvert = [""] + + with tf.io.gfile.GFile(news_file, "r") as rd: + for line in rd: + nid, vert, subvert, title, ab, url, _, _ = line.strip("\n").split( + self.col_spliter + ) + + if nid in self.nid2index: + continue + + self.nid2index[nid] = len(self.nid2index) + 1 + title = word_tokenize(title) + ab = word_tokenize(ab) + news_title.append(title) + news_ab.append(ab) + news_vert.append(vert) + news_subvert.append(subvert) + + self.news_title_index = np.zeros( + (len(news_title), self.title_size), dtype="int32" + ) + + self.news_ab_index = np.zeros((len(news_ab), self.body_size), dtype="int32") + self.news_vert_index = np.zeros((len(news_vert), 1), dtype="int32") + self.news_subvert_index = np.zeros((len(news_subvert), 1), dtype="int32") + + for news_index in range(len(news_title)): + title = news_title[news_index] + ab = news_ab[news_index] + vert = news_vert[news_index] + subvert = news_subvert[news_index] + for word_index in range(min(self.title_size, len(title))): + if title[word_index] in self.word_dict: + self.news_title_index[news_index, word_index] = self.word_dict[ + title[word_index].lower() + ] + for word_index_ab in range(min(self.body_size, len(ab))): + if ab[word_index_ab] in self.word_dict: + self.news_ab_index[news_index, word_index_ab] = self.word_dict[ + ab[word_index_ab].lower() + ] + if vert in self.vert_dict: + self.news_vert_index[news_index, 0] = self.vert_dict[vert] + if subvert in self.subvert_dict: + self.news_subvert_index[news_index, 0] = self.subvert_dict[subvert]
+ +
[docs] def init_behaviors(self, behaviors_file): + """Init behavior logs given behaviors file. + + Args: + behaviors_file (str): path of behaviors file + """ + self.histories = [] + self.imprs = [] + self.labels = [] + self.impr_indexes = [] + self.uindexes = [] + + with tf.io.gfile.GFile(behaviors_file, "r") as rd: + impr_index = 0 + for line in rd: + uid, time, history, impr = line.strip("\n").split(self.col_spliter)[-4:] + + history = [self.nid2index[i] for i in history.split()] + history = [0] * (self.his_size - len(history)) + history[ + : self.his_size + ] + + impr_news = [self.nid2index[i.split("-")[0]] for i in impr.split()] + label = [int(i.split("-")[1]) for i in impr.split()] + uindex = self.uid2index[uid] if uid in self.uid2index else 0 + + self.histories.append(history) + self.imprs.append(impr_news) + self.labels.append(label) + self.impr_indexes.append(impr_index) + self.uindexes.append(uindex) + impr_index += 1
+ +
[docs] def parser_one_line(self, line): + """Parse one string line into feature values. + + Args: + line (str): a string indicating one instance. + + Yields: + list: Parsed results including label, impression id , user id, + candidate_title_index, clicked_title_index, + candidate_ab_index, clicked_ab_index, + candidate_vert_index, clicked_vert_index, + candidate_subvert_index, clicked_subvert_index, + """ + if self.npratio > 0: + impr_label = self.labels[line] + impr = self.imprs[line] + + poss = [] + negs = [] + + for news, click in zip(impr, impr_label): + if click == 1: + poss.append(news) + else: + negs.append(news) + + for p in poss: + candidate_title_index = [] + impr_index = [] + user_index = [] + label = [1] + [0] * self.npratio + + n = newsample(negs, self.npratio) + candidate_title_index = self.news_title_index[[p] + n] + candidate_ab_index = self.news_ab_index[[p] + n] + candidate_vert_index = self.news_vert_index[[p] + n] + candidate_subvert_index = self.news_subvert_index[[p] + n] + click_title_index = self.news_title_index[self.histories[line]] + click_ab_index = self.news_ab_index[self.histories[line]] + click_vert_index = self.news_vert_index[self.histories[line]] + click_subvert_index = self.news_subvert_index[self.histories[line]] + impr_index.append(self.impr_indexes[line]) + user_index.append(self.uindexes[line]) + + yield ( + label, + impr_index, + user_index, + candidate_title_index, + candidate_ab_index, + candidate_vert_index, + candidate_subvert_index, + click_title_index, + click_ab_index, + click_vert_index, + click_subvert_index, + ) + + else: + impr_label = self.labels[line] + impr = self.imprs[line] + + for news, label in zip(impr, impr_label): + candidate_title_index = [] + impr_index = [] + user_index = [] + label = [label] + + candidate_title_index.append(self.news_title_index[news]) + click_title_index = self.news_title_index[self.histories[line]] + + candidate_title_index = self.news_title_index[news] + candidate_ab_index = self.news_ab_index[news] + candidate_vert_index = self.news_vert_index[news] + candidate_subvert_index = self.news_subvert_index[news] + click_title_index = self.news_title_index[self.histories[line]] + click_ab_index = self.news_ab_index[self.histories[line]] + click_vert_index = self.news_vert_index[self.histories[line]] + click_subvert_index = self.news_subvert_index[self.histories[line]] + impr_index.append(self.impr_indexes[line]) + user_index.append(self.uindexes[line]) + + yield ( + label, + impr_index, + user_index, + candidate_title_index, + candidate_ab_index, + candidate_vert_index, + candidate_subvert_index, + click_title_index, + click_ab_index, + click_vert_index, + click_subvert_index, + )
+ +
[docs] def load_data_from_file(self, news_file, behavior_file): + """Read and parse data from a file. + + Args: + news_file (str): A file contains several informations of news. + beahaviros_file (str): A file contains information of user impressions. + + Yields: + object: An iterator that yields parsed results, in the format of graph feed_dict. + """ + + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + if not hasattr(self, "impr_indexes"): + self.init_behaviors(behavior_file) + + label_list = [] + imp_indexes = [] + user_indexes = [] + candidate_title_indexes = [] + candidate_ab_indexes = [] + candidate_vert_indexes = [] + candidate_subvert_indexes = [] + click_title_indexes = [] + click_ab_indexes = [] + click_vert_indexes = [] + click_subvert_indexes = [] + cnt = 0 + + indexes = np.arange(len(self.labels)) + + if self.npratio > 0: + np.random.shuffle(indexes) + + for index in indexes: + for ( + label, + impr_index, + user_index, + candidate_title_index, + candidate_ab_index, + candidate_vert_index, + candidate_subvert_index, + click_title_index, + click_ab_index, + click_vert_index, + click_subvert_index, + ) in self.parser_one_line(index): + candidate_title_indexes.append(candidate_title_index) + candidate_ab_indexes.append(candidate_ab_index) + candidate_vert_indexes.append(candidate_vert_index) + candidate_subvert_indexes.append(candidate_subvert_index) + click_title_indexes.append(click_title_index) + click_ab_indexes.append(click_ab_index) + click_vert_indexes.append(click_vert_index) + click_subvert_indexes.append(click_subvert_index) + imp_indexes.append(impr_index) + user_indexes.append(user_index) + label_list.append(label) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_data( + label_list, + imp_indexes, + user_indexes, + candidate_title_indexes, + candidate_ab_indexes, + candidate_vert_indexes, + candidate_subvert_indexes, + click_title_indexes, + click_ab_indexes, + click_vert_indexes, + click_subvert_indexes, + ) + label_list = [] + imp_indexes = [] + user_indexes = [] + candidate_title_indexes = [] + candidate_ab_indexes = [] + candidate_vert_indexes = [] + candidate_subvert_indexes = [] + click_title_indexes = [] + click_ab_indexes = [] + click_vert_indexes = [] + click_subvert_indexes = [] + cnt = 0
+ + def _convert_data( + self, + label_list, + imp_indexes, + user_indexes, + candidate_title_indexes, + candidate_ab_indexes, + candidate_vert_indexes, + candidate_subvert_indexes, + click_title_indexes, + click_ab_indexes, + click_vert_indexes, + click_subvert_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + label_list (list): a list of ground-truth labels. + imp_indexes (list): a list of impression indexes. + user_indexes (list): a list of user indexes. + candidate_title_indexes (list): the candidate news titles' words indices. + candidate_ab_indexes (list): the candidate news abstarcts' words indices. + candidate_vert_indexes (list): the candidate news verts' words indices. + candidate_subvert_indexes (list): the candidate news subverts' indices. + click_title_indexes (list): words indices for user's clicked news titles. + click_ab_indexes (list): words indices for user's clicked news abstarcts. + click_vert_indexes (list): indices for user's clicked news verts. + click_subvert_indexes (list):indices for user's clicked news subverts. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + labels = np.asarray(label_list, dtype=np.float32) + imp_indexes = np.asarray(imp_indexes, dtype=np.int32) + user_indexes = np.asarray(user_indexes, dtype=np.int32) + candidate_title_index_batch = np.asarray( + candidate_title_indexes, dtype=np.int64 + ) + candidate_ab_index_batch = np.asarray(candidate_ab_indexes, dtype=np.int64) + candidate_vert_index_batch = np.asarray(candidate_vert_indexes, dtype=np.int64) + candidate_subvert_index_batch = np.asarray( + candidate_subvert_indexes, dtype=np.int64 + ) + click_title_index_batch = np.asarray(click_title_indexes, dtype=np.int64) + click_ab_index_batch = np.asarray(click_ab_indexes, dtype=np.int64) + click_vert_index_batch = np.asarray(click_vert_indexes, dtype=np.int64) + click_subvert_index_batch = np.asarray(click_subvert_indexes, dtype=np.int64) + return { + "impression_index_batch": imp_indexes, + "user_index_batch": user_indexes, + "clicked_title_batch": click_title_index_batch, + "clicked_ab_batch": click_ab_index_batch, + "clicked_vert_batch": click_vert_index_batch, + "clicked_subvert_batch": click_subvert_index_batch, + "candidate_title_batch": candidate_title_index_batch, + "candidate_ab_batch": candidate_ab_index_batch, + "candidate_vert_batch": candidate_vert_index_batch, + "candidate_subvert_batch": candidate_subvert_index_batch, + "labels": labels, + } + +
[docs] def load_user_from_file(self, news_file, behavior_file): + """Read and parse user data from news file and behavior file. + + Args: + news_file (str): A file contains several informations of news. + beahaviros_file (str): A file contains information of user impressions. + + Yields: + object: An iterator that yields parsed user feature, in the format of dict. + """ + + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + if not hasattr(self, "impr_indexes"): + self.init_behaviors(behavior_file) + + user_indexes = [] + impr_indexes = [] + click_title_indexes = [] + click_ab_indexes = [] + click_vert_indexes = [] + click_subvert_indexes = [] + cnt = 0 + + for index in range(len(self.impr_indexes)): + click_title_indexes.append(self.news_title_index[self.histories[index]]) + click_ab_indexes.append(self.news_ab_index[self.histories[index]]) + click_vert_indexes.append(self.news_vert_index[self.histories[index]]) + click_subvert_indexes.append(self.news_subvert_index[self.histories[index]]) + user_indexes.append(self.uindexes[index]) + impr_indexes.append(self.impr_indexes[index]) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_user_data( + user_indexes, + impr_indexes, + click_title_indexes, + click_ab_indexes, + click_vert_indexes, + click_subvert_indexes, + ) + user_indexes = [] + impr_indexes = [] + click_title_indexes = [] + click_ab_indexes = [] + click_vert_indexes = [] + click_subvert_indexes = []
+ + def _convert_user_data( + self, + user_indexes, + impr_indexes, + click_title_indexes, + click_ab_indexes, + click_vert_indexes, + click_subvert_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + user_indexes (list): a list of user indexes. + click_title_indexes (list): words indices for user's clicked news titles. + click_ab_indexes (list): words indices for user's clicked news abs. + click_vert_indexes (list): words indices for user's clicked news verts. + click_subvert_indexes (list): words indices for user's clicked news subverts. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + user_indexes = np.asarray(user_indexes, dtype=np.int32) + impr_indexes = np.asarray(impr_indexes, dtype=np.int32) + click_title_index_batch = np.asarray(click_title_indexes, dtype=np.int64) + click_ab_index_batch = np.asarray(click_ab_indexes, dtype=np.int64) + click_vert_index_batch = np.asarray(click_vert_indexes, dtype=np.int64) + click_subvert_index_batch = np.asarray(click_subvert_indexes, dtype=np.int64) + + return { + "user_index_batch": user_indexes, + "impr_index_batch": impr_indexes, + "clicked_title_batch": click_title_index_batch, + "clicked_ab_batch": click_ab_index_batch, + "clicked_vert_batch": click_vert_index_batch, + "clicked_subvert_batch": click_subvert_index_batch, + } + +
[docs] def load_news_from_file(self, news_file): + """Read and parse user data from news file. + + Args: + news_file (str): A file contains several informations of news. + + Yields: + object: An iterator that yields parsed news feature, in the format of dict. + """ + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + news_indexes = [] + candidate_title_indexes = [] + candidate_ab_indexes = [] + candidate_vert_indexes = [] + candidate_subvert_indexes = [] + cnt = 0 + + for index in range(len(self.news_title_index)): + news_indexes.append(index) + candidate_title_indexes.append(self.news_title_index[index]) + candidate_ab_indexes.append(self.news_ab_index[index]) + candidate_vert_indexes.append(self.news_vert_index[index]) + candidate_subvert_indexes.append(self.news_subvert_index[index]) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_news_data( + news_indexes, + candidate_title_indexes, + candidate_ab_indexes, + candidate_vert_indexes, + candidate_subvert_indexes, + ) + news_indexes = [] + candidate_title_indexes = [] + candidate_ab_indexes = [] + candidate_vert_indexes = [] + candidate_subvert_indexes = []
+ + def _convert_news_data( + self, + news_indexes, + candidate_title_indexes, + candidate_ab_indexes, + candidate_vert_indexes, + candidate_subvert_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + news_indexes (list): a list of news indexes. + candidate_title_indexes (list): the candidate news titles' words indices. + candidate_ab_indexes (list): the candidate news abstarcts' words indices. + candidate_vert_indexes (list): the candidate news verts' words indices. + candidate_subvert_indexes (list): the candidate news subverts' words indices. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + news_indexes_batch = np.asarray(news_indexes, dtype=np.int32) + candidate_title_index_batch = np.asarray( + candidate_title_indexes, dtype=np.int32 + ) + candidate_ab_index_batch = np.asarray(candidate_ab_indexes, dtype=np.int32) + candidate_vert_index_batch = np.asarray(candidate_vert_indexes, dtype=np.int32) + candidate_subvert_index_batch = np.asarray( + candidate_subvert_indexes, dtype=np.int32 + ) + + return { + "news_index_batch": news_indexes_batch, + "candidate_title_batch": candidate_title_index_batch, + "candidate_ab_batch": candidate_ab_index_batch, + "candidate_vert_batch": candidate_vert_index_batch, + "candidate_subvert_batch": candidate_subvert_index_batch, + } + +
[docs] def load_impression_from_file(self, behaivors_file): + """Read and parse impression data from behaivors file. + + Args: + behaivors_file (str): A file contains several informations of behaviros. + + Yields: + object: An iterator that yields parsed impression data, in the format of dict. + """ + + if not hasattr(self, "histories"): + self.init_behaviors(behaivors_file) + + indexes = np.arange(len(self.labels)) + + for index in indexes: + impr_label = np.array(self.labels[index], dtype="int32") + impr_news = np.array(self.imprs[index], dtype="int32") + + yield ( + self.impr_indexes[index], + impr_news, + self.uindexes[index], + impr_label, + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/io/mind_iterator.html b/_modules/recommenders/models/newsrec/io/mind_iterator.html new file mode 100644 index 0000000000..d01e8e3593 --- /dev/null +++ b/_modules/recommenders/models/newsrec/io/mind_iterator.html @@ -0,0 +1,853 @@ + + + + + + + + + + + recommenders.models.newsrec.io.mind_iterator — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.io.mind_iterator

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+import numpy as np
+import pickle
+
+from recommenders.models.deeprec.io.iterator import BaseIterator
+from recommenders.models.newsrec.newsrec_utils import word_tokenize, newsample
+
+__all__ = ["MINDIterator"]
+
+
+
[docs]class MINDIterator(BaseIterator): + """Train data loader for NAML model. + The model require a special type of data format, where each instance contains a label, impresion id, user id, + the candidate news articles and user's clicked news article. Articles are represented by title words, + body words, verts and subverts. + + Iterator will not load the whole data into memory. Instead, it loads data into memory + per mini-batch, so that large files can be used as input data. + + Attributes: + col_spliter (str): column spliter in one line. + ID_spliter (str): ID spliter in one line. + batch_size (int): the samples num in one batch. + title_size (int): max word num in news title. + his_size (int): max clicked news num in user click history. + npratio (int): negaive and positive ratio used in negative sampling. -1 means no need of negtive sampling. + """ + + def __init__( + self, + hparams, + npratio=-1, + col_spliter="\t", + ID_spliter="%", + ): + """Initialize an iterator. Create necessary placeholders for the model. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as head_num and head_dim are there. + npratio (int): negaive and positive ratio used in negative sampling. -1 means no need of negtive sampling. + col_spliter (str): column spliter in one line. + ID_spliter (str): ID spliter in one line. + """ + self.col_spliter = col_spliter + self.ID_spliter = ID_spliter + self.batch_size = hparams.batch_size + self.title_size = hparams.title_size + self.his_size = hparams.his_size + self.npratio = npratio + + self.word_dict = self.load_dict(hparams.wordDict_file) + self.uid2index = self.load_dict(hparams.userDict_file) + +
[docs] def load_dict(self, file_path): + """load pickle file + + Args: + file path (str): file path + + Returns: + object: pickle loaded object + """ + with open(file_path, "rb") as f: + return pickle.load(f)
+ +
[docs] def init_news(self, news_file): + """init news information given news file, such as news_title_index and nid2index. + Args: + news_file: path of news file + """ + + self.nid2index = {} + news_title = [""] + + with tf.io.gfile.GFile(news_file, "r") as rd: + for line in rd: + nid, vert, subvert, title, ab, url, _, _ = line.strip("\n").split( + self.col_spliter + ) + + if nid in self.nid2index: + continue + + self.nid2index[nid] = len(self.nid2index) + 1 + title = word_tokenize(title) + news_title.append(title) + + self.news_title_index = np.zeros( + (len(news_title), self.title_size), dtype="int32" + ) + + for news_index in range(len(news_title)): + title = news_title[news_index] + for word_index in range(min(self.title_size, len(title))): + if title[word_index] in self.word_dict: + self.news_title_index[news_index, word_index] = self.word_dict[ + title[word_index].lower() + ]
+ +
[docs] def init_behaviors(self, behaviors_file): + """init behavior logs given behaviors file. + + Args: + behaviors_file: path of behaviors file + """ + self.histories = [] + self.imprs = [] + self.labels = [] + self.impr_indexes = [] + self.uindexes = [] + + with tf.io.gfile.GFile(behaviors_file, "r") as rd: + impr_index = 0 + for line in rd: + uid, time, history, impr = line.strip("\n").split(self.col_spliter)[-4:] + + history = [self.nid2index[i] for i in history.split()] + history = [0] * (self.his_size - len(history)) + history[ + : self.his_size + ] + + impr_news = [self.nid2index[i.split("-")[0]] for i in impr.split()] + label = [int(i.split("-")[1]) for i in impr.split()] + uindex = self.uid2index[uid] if uid in self.uid2index else 0 + + self.histories.append(history) + self.imprs.append(impr_news) + self.labels.append(label) + self.impr_indexes.append(impr_index) + self.uindexes.append(uindex) + impr_index += 1
+ +
[docs] def parser_one_line(self, line): + """Parse one behavior sample into feature values. + if npratio is larger than 0, return negtive sampled result. + + Args: + line (int): sample index. + + Yields: + list: Parsed results including label, impression id , user id, + candidate_title_index, clicked_title_index. + """ + if self.npratio > 0: + impr_label = self.labels[line] + impr = self.imprs[line] + + poss = [] + negs = [] + + for news, click in zip(impr, impr_label): + if click == 1: + poss.append(news) + else: + negs.append(news) + + for p in poss: + candidate_title_index = [] + impr_index = [] + user_index = [] + label = [1] + [0] * self.npratio + + n = newsample(negs, self.npratio) + candidate_title_index = self.news_title_index[[p] + n] + click_title_index = self.news_title_index[self.histories[line]] + impr_index.append(self.impr_indexes[line]) + user_index.append(self.uindexes[line]) + + yield ( + label, + impr_index, + user_index, + candidate_title_index, + click_title_index, + ) + + else: + impr_label = self.labels[line] + impr = self.imprs[line] + + for news, label in zip(impr, impr_label): + candidate_title_index = [] + impr_index = [] + user_index = [] + label = [label] + + candidate_title_index.append(self.news_title_index[news]) + click_title_index = self.news_title_index[self.histories[line]] + impr_index.append(self.impr_indexes[line]) + user_index.append(self.uindexes[line]) + + yield ( + label, + impr_index, + user_index, + candidate_title_index, + click_title_index, + )
+ +
[docs] def load_data_from_file(self, news_file, behavior_file): + """Read and parse data from news file and behavior file. + + Args: + news_file (str): A file contains several informations of news. + beahaviros_file (str): A file contains information of user impressions. + + Yields: + object: An iterator that yields parsed results, in the format of dict. + """ + + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + if not hasattr(self, "impr_indexes"): + self.init_behaviors(behavior_file) + + label_list = [] + imp_indexes = [] + user_indexes = [] + candidate_title_indexes = [] + click_title_indexes = [] + cnt = 0 + + indexes = np.arange(len(self.labels)) + + if self.npratio > 0: + np.random.shuffle(indexes) + + for index in indexes: + for ( + label, + imp_index, + user_index, + candidate_title_index, + click_title_index, + ) in self.parser_one_line(index): + candidate_title_indexes.append(candidate_title_index) + click_title_indexes.append(click_title_index) + imp_indexes.append(imp_index) + user_indexes.append(user_index) + label_list.append(label) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_data( + label_list, + imp_indexes, + user_indexes, + candidate_title_indexes, + click_title_indexes, + ) + label_list = [] + imp_indexes = [] + user_indexes = [] + candidate_title_indexes = [] + click_title_indexes = [] + cnt = 0 + + if cnt > 0: + yield self._convert_data( + label_list, + imp_indexes, + user_indexes, + candidate_title_indexes, + click_title_indexes, + )
+ + def _convert_data( + self, + label_list, + imp_indexes, + user_indexes, + candidate_title_indexes, + click_title_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + label_list (list): a list of ground-truth labels. + imp_indexes (list): a list of impression indexes. + user_indexes (list): a list of user indexes. + candidate_title_indexes (list): the candidate news titles' words indices. + click_title_indexes (list): words indices for user's clicked news titles. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + labels = np.asarray(label_list, dtype=np.float32) + imp_indexes = np.asarray(imp_indexes, dtype=np.int32) + user_indexes = np.asarray(user_indexes, dtype=np.int32) + candidate_title_index_batch = np.asarray( + candidate_title_indexes, dtype=np.int64 + ) + click_title_index_batch = np.asarray(click_title_indexes, dtype=np.int64) + return { + "impression_index_batch": imp_indexes, + "user_index_batch": user_indexes, + "clicked_title_batch": click_title_index_batch, + "candidate_title_batch": candidate_title_index_batch, + "labels": labels, + } + +
[docs] def load_user_from_file(self, news_file, behavior_file): + """Read and parse user data from news file and behavior file. + + Args: + news_file (str): A file contains several informations of news. + beahaviros_file (str): A file contains information of user impressions. + + Yields: + object: An iterator that yields parsed user feature, in the format of dict. + """ + + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + if not hasattr(self, "impr_indexes"): + self.init_behaviors(behavior_file) + + user_indexes = [] + impr_indexes = [] + click_title_indexes = [] + cnt = 0 + + for index in range(len(self.impr_indexes)): + click_title_indexes.append(self.news_title_index[self.histories[index]]) + user_indexes.append(self.uindexes[index]) + impr_indexes.append(self.impr_indexes[index]) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_user_data( + user_indexes, + impr_indexes, + click_title_indexes, + ) + user_indexes = [] + impr_indexes = [] + click_title_indexes = [] + cnt = 0 + + if cnt > 0: + yield self._convert_user_data( + user_indexes, + impr_indexes, + click_title_indexes, + )
+ + def _convert_user_data( + self, + user_indexes, + impr_indexes, + click_title_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + user_indexes (list): a list of user indexes. + click_title_indexes (list): words indices for user's clicked news titles. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + user_indexes = np.asarray(user_indexes, dtype=np.int32) + impr_indexes = np.asarray(impr_indexes, dtype=np.int32) + click_title_index_batch = np.asarray(click_title_indexes, dtype=np.int64) + + return { + "user_index_batch": user_indexes, + "impr_index_batch": impr_indexes, + "clicked_title_batch": click_title_index_batch, + } + +
[docs] def load_news_from_file(self, news_file): + """Read and parse user data from news file. + + Args: + news_file (str): A file contains several informations of news. + + Yields: + object: An iterator that yields parsed news feature, in the format of dict. + """ + if not hasattr(self, "news_title_index"): + self.init_news(news_file) + + news_indexes = [] + candidate_title_indexes = [] + cnt = 0 + + for index in range(len(self.news_title_index)): + news_indexes.append(index) + candidate_title_indexes.append(self.news_title_index[index]) + + cnt += 1 + if cnt >= self.batch_size: + yield self._convert_news_data( + news_indexes, + candidate_title_indexes, + ) + news_indexes = [] + candidate_title_indexes = [] + cnt = 0 + + if cnt > 0: + yield self._convert_news_data( + news_indexes, + candidate_title_indexes, + )
+ + def _convert_news_data( + self, + news_indexes, + candidate_title_indexes, + ): + """Convert data into numpy arrays that are good for further model operation. + + Args: + news_indexes (list): a list of news indexes. + candidate_title_indexes (list): the candidate news titles' words indices. + + Returns: + dict: A dictionary, containing multiple numpy arrays that are convenient for further operation. + """ + + news_indexes_batch = np.asarray(news_indexes, dtype=np.int32) + candidate_title_index_batch = np.asarray( + candidate_title_indexes, dtype=np.int32 + ) + + return { + "news_index_batch": news_indexes_batch, + "candidate_title_batch": candidate_title_index_batch, + } + +
[docs] def load_impression_from_file(self, behaivors_file): + """Read and parse impression data from behaivors file. + + Args: + behaivors_file (str): A file contains several informations of behaviros. + + Yields: + object: An iterator that yields parsed impression data, in the format of dict. + """ + + if not hasattr(self, "histories"): + self.init_behaviors(behaivors_file) + + indexes = np.arange(len(self.labels)) + + for index in indexes: + impr_label = np.array(self.labels[index], dtype="int32") + impr_news = np.array(self.imprs[index], dtype="int32") + + yield ( + self.impr_indexes[index], + impr_news, + self.uindexes[index], + impr_label, + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/base_model.html b/_modules/recommenders/models/newsrec/models/base_model.html new file mode 100644 index 0000000000..bd9e3798d9 --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/base_model.html @@ -0,0 +1,814 @@ + + + + + + + + + + + recommenders.models.newsrec.models.base_model — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.base_model

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import abc
+import time
+import numpy as np
+from tqdm import tqdm
+import tensorflow as tf
+from tensorflow.compat.v1 import keras
+
+from recommenders.models.deeprec.deeprec_utils import cal_metric
+
+tf.compat.v1.disable_eager_execution()
+tf.compat.v1.experimental.output_all_intermediates(True)
+__all__ = ["BaseModel"]
+
+
+
[docs]class BaseModel: + """Basic class of models + + Attributes: + hparams (HParams): A HParams object, holds the entire set of hyperparameters. + train_iterator (object): An iterator to load the data in training steps. + test_iterator (object): An iterator to load the data in testing steps. + graph (object): An optional graph. + seed (int): Random seed. + """ + + def __init__( + self, + hparams, + iterator_creator, + seed=None, + ): + """Initializing the model. Create common logics which are needed by all deeprec models, such as loss function, + parameter set. + + Args: + hparams (HParams): A HParams object, holds the entire set of hyperparameters. + iterator_creator (object): An iterator to load the data. + graph (object): An optional graph. + seed (int): Random seed. + """ + self.seed = seed + tf.compat.v1.set_random_seed(seed) + np.random.seed(seed) + + self.train_iterator = iterator_creator( + hparams, + hparams.npratio, + col_spliter="\t", + ) + self.test_iterator = iterator_creator( + hparams, + col_spliter="\t", + ) + + self.hparams = hparams + self.support_quick_scoring = hparams.support_quick_scoring + + # set GPU use with on demand growth + gpu_options = tf.compat.v1.GPUOptions(allow_growth=True) + sess = tf.compat.v1.Session( + config=tf.compat.v1.ConfigProto(gpu_options=gpu_options) + ) + + # set this TensorFlow session as the default session for Keras + tf.compat.v1.keras.backend.set_session(sess) + + # IMPORTANT: models have to be loaded AFTER SETTING THE SESSION for keras! + # Otherwise, their weights will be unavailable in the threads after the session there has been set + self.model, self.scorer = self._build_graph() + + self.loss = self._get_loss() + self.train_optimizer = self._get_opt() + + self.model.compile(loss=self.loss, optimizer=self.train_optimizer) + + def _init_embedding(self, file_path): + """Load pre-trained embeddings as a constant tensor. + + Args: + file_path (str): the pre-trained glove embeddings file path. + + Returns: + numpy.ndarray: A constant numpy array. + """ + + return np.load(file_path) + + @abc.abstractmethod + def _build_graph(self): + """Subclass will implement this.""" + pass + + @abc.abstractmethod + def _get_input_label_from_iter(self, batch_data): + """Subclass will implement this""" + pass + + def _get_loss(self): + """Make loss function, consists of data loss and regularization loss + + Returns: + object: Loss function or loss function name + """ + if self.hparams.loss == "cross_entropy_loss": + data_loss = "categorical_crossentropy" + elif self.hparams.loss == "log_loss": + data_loss = "binary_crossentropy" + else: + raise ValueError("this loss not defined {0}".format(self.hparams.loss)) + return data_loss + + def _get_opt(self): + """Get the optimizer according to configuration. Usually we will use Adam. + Returns: + object: An optimizer. + """ + lr = self.hparams.learning_rate + optimizer = self.hparams.optimizer + + if optimizer == "adam": + train_opt = keras.optimizers.Adam(lr=lr) + + return train_opt + + def _get_pred(self, logit, task): + """Make final output as prediction score, according to different tasks. + + Args: + logit (object): Base prediction value. + task (str): A task (values: regression/classification) + + Returns: + object: Transformed score + """ + if task == "regression": + pred = tf.identity(logit) + elif task == "classification": + pred = tf.sigmoid(logit) + else: + raise ValueError( + "method must be regression or classification, but now is {0}".format( + task + ) + ) + return pred + +
[docs] def train(self, train_batch_data): + """Go through the optimization step once with training data in feed_dict. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values to train the model. This is a dictionary that maps graph elements to values. + + Returns: + list: A list of values, including update operation, total loss, data loss, and merged summary. + """ + train_input, train_label = self._get_input_label_from_iter(train_batch_data) + rslt = self.model.train_on_batch(train_input, train_label) + return rslt
+ +
[docs] def eval(self, eval_batch_data): + """Evaluate the data in feed_dict with current model. + + Args: + sess (object): The model session object. + feed_dict (dict): Feed values for evaluation. This is a dictionary that maps graph elements to values. + + Returns: + list: A list of evaluated results, including total loss value, data loss value, predicted scores, and ground-truth labels. + """ + eval_input, eval_label = self._get_input_label_from_iter(eval_batch_data) + imp_index = eval_batch_data["impression_index_batch"] + + pred_rslt = self.scorer.predict_on_batch(eval_input) + + return pred_rslt, eval_label, imp_index
+ +
[docs] def fit( + self, + train_news_file, + train_behaviors_file, + valid_news_file, + valid_behaviors_file, + test_news_file=None, + test_behaviors_file=None, + ): + """Fit the model with train_file. Evaluate the model on valid_file per epoch to observe the training status. + If test_news_file is not None, evaluate it too. + + Args: + train_file (str): training data set. + valid_file (str): validation set. + test_news_file (str): test set. + + Returns: + object: An instance of self. + """ + + for epoch in range(1, self.hparams.epochs + 1): + step = 0 + self.hparams.current_epoch = epoch + epoch_loss = 0 + train_start = time.time() + + tqdm_util = tqdm( + self.train_iterator.load_data_from_file( + train_news_file, train_behaviors_file + ) + ) + + for batch_data_input in tqdm_util: + + step_result = self.train(batch_data_input) + step_data_loss = step_result + + epoch_loss += step_data_loss + step += 1 + if step % self.hparams.show_step == 0: + tqdm_util.set_description( + "step {0:d} , total_loss: {1:.4f}, data_loss: {2:.4f}".format( + step, epoch_loss / step, step_data_loss + ) + ) + + train_end = time.time() + train_time = train_end - train_start + + eval_start = time.time() + + train_info = ",".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in [("logloss loss", epoch_loss / step)] + ] + ) + + eval_res = self.run_eval(valid_news_file, valid_behaviors_file) + eval_info = ", ".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in sorted(eval_res.items(), key=lambda x: x[0]) + ] + ) + if test_news_file is not None: + test_res = self.run_eval(test_news_file, test_behaviors_file) + test_info = ", ".join( + [ + str(item[0]) + ":" + str(item[1]) + for item in sorted(test_res.items(), key=lambda x: x[0]) + ] + ) + eval_end = time.time() + eval_time = eval_end - eval_start + + if test_news_file is not None: + print( + "at epoch {0:d}".format(epoch) + + "\ntrain info: " + + train_info + + "\neval info: " + + eval_info + + "\ntest info: " + + test_info + ) + else: + print( + "at epoch {0:d}".format(epoch) + + "\ntrain info: " + + train_info + + "\neval info: " + + eval_info + ) + print( + "at epoch {0:d} , train time: {1:.1f} eval time: {2:.1f}".format( + epoch, train_time, eval_time + ) + ) + + return self
+ +
[docs] def group_labels(self, labels, preds, group_keys): + """Devide labels and preds into several group according to values in group keys. + + Args: + labels (list): ground truth label list. + preds (list): prediction score list. + group_keys (list): group key list. + + Returns: + list, list, list: + - Keys after group. + - Labels after group. + - Preds after group. + + """ + + all_keys = list(set(group_keys)) + all_keys.sort() + group_labels = {k: [] for k in all_keys} + group_preds = {k: [] for k in all_keys} + + for label, p, k in zip(labels, preds, group_keys): + group_labels[k].append(label) + group_preds[k].append(p) + + all_labels = [] + all_preds = [] + for k in all_keys: + all_labels.append(group_labels[k]) + all_preds.append(group_preds[k]) + + return all_keys, all_labels, all_preds
+ +
[docs] def run_eval(self, news_filename, behaviors_file): + """Evaluate the given file and returns some evaluation metrics. + + Args: + filename (str): A file name that will be evaluated. + + Returns: + dict: A dictionary that contains evaluation metrics. + """ + + if self.support_quick_scoring: + _, group_labels, group_preds = self.run_fast_eval( + news_filename, behaviors_file + ) + else: + _, group_labels, group_preds = self.run_slow_eval( + news_filename, behaviors_file + ) + res = cal_metric(group_labels, group_preds, self.hparams.metrics) + return res
+ + def user(self, batch_user_input): + user_input = self._get_user_feature_from_iter(batch_user_input) + user_vec = self.userencoder.predict_on_batch(user_input) + user_index = batch_user_input["impr_index_batch"] + + return user_index, user_vec + + def news(self, batch_news_input): + news_input = self._get_news_feature_from_iter(batch_news_input) + news_vec = self.newsencoder.predict_on_batch(news_input) + news_index = batch_news_input["news_index_batch"] + + return news_index, news_vec + + def run_user(self, news_filename, behaviors_file): + if not hasattr(self, "userencoder"): + raise ValueError("model must have attribute userencoder") + + user_indexes = [] + user_vecs = [] + for batch_data_input in tqdm( + self.test_iterator.load_user_from_file(news_filename, behaviors_file) + ): + user_index, user_vec = self.user(batch_data_input) + user_indexes.extend(np.reshape(user_index, -1)) + user_vecs.extend(user_vec) + + return dict(zip(user_indexes, user_vecs)) + + def run_news(self, news_filename): + if not hasattr(self, "newsencoder"): + raise ValueError("model must have attribute newsencoder") + + news_indexes = [] + news_vecs = [] + for batch_data_input in tqdm( + self.test_iterator.load_news_from_file(news_filename) + ): + news_index, news_vec = self.news(batch_data_input) + news_indexes.extend(np.reshape(news_index, -1)) + news_vecs.extend(news_vec) + + return dict(zip(news_indexes, news_vecs)) + + def run_slow_eval(self, news_filename, behaviors_file): + preds = [] + labels = [] + imp_indexes = [] + + for batch_data_input in tqdm( + self.test_iterator.load_data_from_file(news_filename, behaviors_file) + ): + step_pred, step_labels, step_imp_index = self.eval(batch_data_input) + preds.extend(np.reshape(step_pred, -1)) + labels.extend(np.reshape(step_labels, -1)) + imp_indexes.extend(np.reshape(step_imp_index, -1)) + + group_impr_indexes, group_labels, group_preds = self.group_labels( + labels, preds, imp_indexes + ) + return group_impr_indexes, group_labels, group_preds + + def run_fast_eval(self, news_filename, behaviors_file): + news_vecs = self.run_news(news_filename) + user_vecs = self.run_user(news_filename, behaviors_file) + + self.news_vecs = news_vecs + self.user_vecs = user_vecs + + group_impr_indexes = [] + group_labels = [] + group_preds = [] + + for ( + impr_index, + news_index, + user_index, + label, + ) in tqdm(self.test_iterator.load_impression_from_file(behaviors_file)): + pred = np.dot( + np.stack([news_vecs[i] for i in news_index], axis=0), + user_vecs[impr_index], + ) + group_impr_indexes.append(impr_index) + group_labels.append(label) + group_preds.append(pred) + + return group_impr_indexes, group_labels, group_preds
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/layers.html b/_modules/recommenders/models/newsrec/models/layers.html new file mode 100644 index 0000000000..23a4ce54d9 --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/layers.html @@ -0,0 +1,748 @@ + + + + + + + + + + + recommenders.models.newsrec.models.layers — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.layers

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow.compat.v1.keras as keras
+from tensorflow.compat.v1.linalg import einsum
+from tensorflow.compat.v1.keras import layers
+from tensorflow.compat.v1.keras import backend as K
+
+
+
[docs]class AttLayer2(layers.Layer): + """Soft alignment attention implement. + + Attributes: + dim (int): attention hidden dim + """ + + def __init__(self, dim=200, seed=0, **kwargs): + """Initialization steps for AttLayer2. + + Args: + dim (int): attention hidden dim + """ + + self.dim = dim + self.seed = seed + super(AttLayer2, self).__init__(**kwargs) + +
[docs] def build(self, input_shape): + """Initialization for variables in AttLayer2 + There are there variables in AttLayer2, i.e. W, b and q. + + Args: + input_shape (object): shape of input tensor. + """ + + assert len(input_shape) == 3 + dim = self.dim + self.W = self.add_weight( + name="W", + shape=(int(input_shape[-1]), dim), + initializer=keras.initializers.glorot_uniform(seed=self.seed), + trainable=True, + ) + self.b = self.add_weight( + name="b", + shape=(dim,), + initializer=keras.initializers.Zeros(), + trainable=True, + ) + self.q = self.add_weight( + name="q", + shape=(dim, 1), + initializer=keras.initializers.glorot_uniform(seed=self.seed), + trainable=True, + ) + super(AttLayer2, self).build(input_shape) # be sure you call this somewhere!
+ +
[docs] def call(self, inputs, mask=None, **kwargs): + """Core implementation of soft attention. + + Args: + inputs (object): input tensor. + + Returns: + object: weighted sum of input tensors. + """ + + attention = K.tanh(K.dot(inputs, self.W) + self.b) + attention = K.dot(attention, self.q) + + attention = K.squeeze(attention, axis=2) + + if mask is None: + attention = K.exp(attention) + else: + attention = K.exp(attention) * K.cast(mask, dtype="float32") + + attention_weight = attention / ( + K.sum(attention, axis=-1, keepdims=True) + K.epsilon() + ) + + attention_weight = K.expand_dims(attention_weight) + weighted_input = inputs * attention_weight + return K.sum(weighted_input, axis=1)
+ +
[docs] def compute_mask(self, input, input_mask=None): + """Compte output mask value. + + Args: + input (object): input tensor. + input_mask: input mask + + Returns: + object: output mask. + """ + return None
+ +
[docs] def compute_output_shape(self, input_shape): + """Compute shape of output tensor. + + Args: + input_shape (tuple): shape of input tensor. + + Returns: + tuple: shape of output tensor. + """ + return input_shape[0], input_shape[-1]
+ + +
[docs]class SelfAttention(layers.Layer): + """Multi-head self attention implement. + + Args: + multiheads (int): The number of heads. + head_dim (object): Dimension of each head. + mask_right (boolean): whether to mask right words. + + Returns: + object: Weighted sum after attention. + """ + + def __init__(self, multiheads, head_dim, seed=0, mask_right=False, **kwargs): + """Initialization steps for AttLayer2. + + Args: + multiheads (int): The number of heads. + head_dim (object): Dimension of each head. + mask_right (boolean): Whether to mask right words. + """ + + self.multiheads = multiheads + self.head_dim = head_dim + self.output_dim = multiheads * head_dim + self.mask_right = mask_right + self.seed = seed + super(SelfAttention, self).__init__(**kwargs) + +
[docs] def compute_output_shape(self, input_shape): + """Compute shape of output tensor. + + Returns: + tuple: output shape tuple. + """ + + return (input_shape[0][0], input_shape[0][1], self.output_dim)
+ +
[docs] def build(self, input_shape): + """Initialization for variables in SelfAttention. + There are three variables in SelfAttention, i.e. WQ, WK ans WV. + WQ is used for linear transformation of query. + WK is used for linear transformation of key. + WV is used for linear transformation of value. + + Args: + input_shape (object): shape of input tensor. + """ + + self.WQ = self.add_weight( + name="WQ", + shape=(int(input_shape[0][-1]), self.output_dim), + initializer=keras.initializers.glorot_uniform(seed=self.seed), + trainable=True, + ) + self.WK = self.add_weight( + name="WK", + shape=(int(input_shape[1][-1]), self.output_dim), + initializer=keras.initializers.glorot_uniform(seed=self.seed), + trainable=True, + ) + self.WV = self.add_weight( + name="WV", + shape=(int(input_shape[2][-1]), self.output_dim), + initializer=keras.initializers.glorot_uniform(seed=self.seed), + trainable=True, + ) + super(SelfAttention, self).build(input_shape)
+ +
[docs] def Mask(self, inputs, seq_len, mode="add"): + """Mask operation used in multi-head self attention + + Args: + seq_len (object): sequence length of inputs. + mode (str): mode of mask. + + Returns: + object: tensors after masking. + """ + + if seq_len is None: + return inputs + else: + mask = K.one_hot(indices=seq_len[:, 0], num_classes=K.shape(inputs)[1]) + mask = 1 - K.cumsum(mask, axis=1) + + for _ in range(len(inputs.shape) - 2): + mask = K.expand_dims(mask, 2) + + if mode == "mul": + return inputs * mask + elif mode == "add": + return inputs - (1 - mask) * 1e12
+ +
[docs] def call(self, QKVs): + """Core logic of multi-head self attention. + + Args: + QKVs (list): inputs of multi-head self attention i.e. query, key and value. + + Returns: + object: ouput tensors. + """ + if len(QKVs) == 3: + Q_seq, K_seq, V_seq = QKVs + Q_len, V_len = None, None + elif len(QKVs) == 5: + Q_seq, K_seq, V_seq, Q_len, V_len = QKVs + Q_seq = K.dot(Q_seq, self.WQ) + Q_seq = K.reshape( + Q_seq, shape=(-1, K.shape(Q_seq)[1], self.multiheads, self.head_dim) + ) + Q_seq = K.permute_dimensions(Q_seq, pattern=(0, 2, 1, 3)) + + K_seq = K.dot(K_seq, self.WK) + K_seq = K.reshape( + K_seq, shape=(-1, K.shape(K_seq)[1], self.multiheads, self.head_dim) + ) + K_seq = K.permute_dimensions(K_seq, pattern=(0, 2, 1, 3)) + + V_seq = K.dot(V_seq, self.WV) + V_seq = K.reshape( + V_seq, shape=(-1, K.shape(V_seq)[1], self.multiheads, self.head_dim) + ) + V_seq = K.permute_dimensions(V_seq, pattern=(0, 2, 1, 3)) + + A = einsum("abij, abkj -> abik", Q_seq, K_seq) / K.sqrt( + K.cast(self.head_dim, dtype="float32") + ) + A = K.permute_dimensions( + A, pattern=(0, 3, 2, 1) + ) # A.shape=[batch_size,K_sequence_length,Q_sequence_length,self.multiheads] + + A = self.Mask(A, V_len, "add") + A = K.permute_dimensions(A, pattern=(0, 3, 2, 1)) + + if self.mask_right: + ones = K.ones_like(A[:1, :1]) + lower_triangular = K.tf.matrix_band_part(ones, num_lower=-1, num_upper=0) + mask = (ones - lower_triangular) * 1e12 + A = A - mask + A = K.softmax(A) + + O_seq = einsum("abij, abjk -> abik", A, V_seq) + O_seq = K.permute_dimensions(O_seq, pattern=(0, 2, 1, 3)) + + O_seq = K.reshape(O_seq, shape=(-1, K.shape(O_seq)[1], self.output_dim)) + O_seq = self.Mask(O_seq, Q_len, "mul") + return O_seq
+ +
[docs] def get_config(self): + """add multiheads, multiheads and mask_right into layer config. + + Returns: + dict: config of SelfAttention layer. + """ + config = super(SelfAttention, self).get_config() + config.update( + { + "multiheads": self.multiheads, + "head_dim": self.head_dim, + "mask_right": self.mask_right, + } + ) + return config
+ + +
[docs]def PersonalizedAttentivePooling(dim1, dim2, dim3, seed=0): + """Soft alignment attention implement. + + Attributes: + dim1 (int): first dimention of value shape. + dim2 (int): second dimention of value shape. + dim3 (int): shape of query + + Returns: + object: weighted summary of inputs value. + """ + vecs_input = keras.Input(shape=(dim1, dim2), dtype="float32") + query_input = keras.Input(shape=(dim3,), dtype="float32") + + user_vecs = layers.Dropout(0.2)(vecs_input) + user_att = layers.Dense( + dim3, + activation="tanh", + kernel_initializer=keras.initializers.glorot_uniform(seed=seed), + bias_initializer=keras.initializers.Zeros(), + )(user_vecs) + user_att2 = layers.Dot(axes=-1)([query_input, user_att]) + user_att2 = layers.Activation("softmax")(user_att2) + user_vec = layers.Dot((1, 1))([user_vecs, user_att2]) + + model = keras.Model([vecs_input, query_input], user_vec) + return model
+ + +
[docs]class ComputeMasking(layers.Layer): + """Compute if inputs contains zero value. + + Returns: + bool tensor: True for values not equal to zero. + """ + + def __init__(self, **kwargs): + super(ComputeMasking, self).__init__(**kwargs) + +
[docs] def call(self, inputs, **kwargs): + """Call method for ComputeMasking. + + Args: + inputs (object): input tensor. + + Returns: + bool tensor: True for values not equal to zero. + """ + mask = K.not_equal(inputs, 0) + return K.cast(mask, K.floatx())
+ +
[docs] def compute_output_shape(self, input_shape): + return input_shape
+ + +
[docs]class OverwriteMasking(layers.Layer): + """Set values at specific positions to zero. + + Args: + inputs (list): value tensor and mask tensor. + + Returns: + object: tensor after setting values to zero. + """ + + def __init__(self, **kwargs): + super(OverwriteMasking, self).__init__(**kwargs) + +
[docs] def build(self, input_shape): + super(OverwriteMasking, self).build(input_shape)
+ +
[docs] def call(self, inputs, **kwargs): + """Call method for OverwriteMasking. + + Args: + inputs (list): value tensor and mask tensor. + + Returns: + object: tensor after setting values to zero. + """ + return inputs[0] * K.expand_dims(inputs[1])
+ +
[docs] def compute_output_shape(self, input_shape): + return input_shape[0]
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/lstur.html b/_modules/recommenders/models/newsrec/models/lstur.html new file mode 100644 index 0000000000..16749bda1c --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/lstur.html @@ -0,0 +1,601 @@ + + + + + + + + + + + recommenders.models.newsrec.models.lstur — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.lstur

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow.compat.v1.keras as keras
+from tensorflow.compat.v1.keras import layers
+
+
+from recommenders.models.newsrec.models.base_model import BaseModel
+from recommenders.models.newsrec.models.layers import (
+    AttLayer2,
+    ComputeMasking,
+    OverwriteMasking,
+)
+
+__all__ = ["LSTURModel"]
+
+
+
[docs]class LSTURModel(BaseModel): + """LSTUR model(Neural News Recommendation with Multi-Head Self-Attention) + + Mingxiao An, Fangzhao Wu, Chuhan Wu, Kun Zhang, Zheng Liu and Xing Xie: + Neural News Recommendation with Long- and Short-term User Representations, ACL 2019 + + Attributes: + word2vec_embedding (numpy.ndarray): Pretrained word embedding matrix. + hparam (object): Global hyper-parameters. + """ + + def __init__(self, hparams, iterator_creator, seed=None): + """Initialization steps for LSTUR. + Compared with the BaseModel, LSTUR need word embedding. + After creating word embedding matrix, BaseModel's __init__ method will be called. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as type and gru_unit are there. + iterator_creator_train (object): LSTUR data loader class for train data. + iterator_creator_test (object): LSTUR data loader class for test and validation data + """ + + self.word2vec_embedding = self._init_embedding(hparams.wordEmb_file) + self.hparam = hparams + + super().__init__(hparams, iterator_creator, seed=seed) + + def _get_input_label_from_iter(self, batch_data): + input_feat = [ + batch_data["user_index_batch"], + batch_data["clicked_title_batch"], + batch_data["candidate_title_batch"], + ] + input_label = batch_data["labels"] + return input_feat, input_label + + def _get_user_feature_from_iter(self, batch_data): + return [batch_data["clicked_title_batch"], batch_data["user_index_batch"]] + + def _get_news_feature_from_iter(self, batch_data): + return batch_data["candidate_title_batch"] + + def _build_graph(self): + """Build LSTUR model and scorer. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + + model, scorer = self._build_lstur() + return model, scorer + + def _build_userencoder(self, titleencoder, type="ini"): + """The main function to create user encoder of LSTUR. + + Args: + titleencoder (object): the news encoder of LSTUR. + + Return: + object: the user encoder of LSTUR. + """ + hparams = self.hparams + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + user_indexes = keras.Input(shape=(1,), dtype="int32") + + user_embedding_layer = layers.Embedding( + len(self.train_iterator.uid2index), + hparams.gru_unit, + trainable=True, + embeddings_initializer="zeros", + ) + + long_u_emb = layers.Reshape((hparams.gru_unit,))( + user_embedding_layer(user_indexes) + ) + click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title) + + if type == "ini": + user_present = layers.GRU( + hparams.gru_unit, + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed), + bias_initializer=keras.initializers.Zeros(), + )( + layers.Masking(mask_value=0.0)(click_title_presents), + initial_state=[long_u_emb], + ) + elif type == "con": + short_uemb = layers.GRU( + hparams.gru_unit, + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + recurrent_initializer=keras.initializers.glorot_uniform(seed=self.seed), + bias_initializer=keras.initializers.Zeros(), + )(layers.Masking(mask_value=0.0)(click_title_presents)) + + user_present = layers.Concatenate()([short_uemb, long_u_emb]) + user_present = layers.Dense( + hparams.gru_unit, + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(user_present) + + model = keras.Model( + [his_input_title, user_indexes], user_present, name="user_encoder" + ) + return model + + def _build_newsencoder(self, embedding_layer): + """The main function to create news encoder of LSTUR. + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the news encoder of LSTUR. + """ + hparams = self.hparams + sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32") + embedded_sequences_title = embedding_layer(sequences_input_title) + + y = layers.Dropout(hparams.dropout)(embedded_sequences_title) + y = layers.Conv1D( + hparams.filter_num, + hparams.window_size, + activation=hparams.cnn_activation, + padding="same", + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(y) + print(y) + y = layers.Dropout(hparams.dropout)(y) + y = layers.Masking()( + OverwriteMasking()([y, ComputeMasking()(sequences_input_title)]) + ) + pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y) + print(pred_title) + model = keras.Model(sequences_input_title, pred_title, name="news_encoder") + return model + + def _build_lstur(self): + """The main function to create LSTUR's logic. The core of LSTUR + is a user encoder and a news encoder. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + hparams = self.hparams + + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + pred_input_title = keras.Input( + shape=(hparams.npratio + 1, hparams.title_size), dtype="int32" + ) + pred_input_title_one = keras.Input( + shape=( + 1, + hparams.title_size, + ), + dtype="int32", + ) + pred_title_reshape = layers.Reshape((hparams.title_size,))(pred_input_title_one) + user_indexes = keras.Input(shape=(1,), dtype="int32") + + embedding_layer = layers.Embedding( + self.word2vec_embedding.shape[0], + hparams.word_emb_dim, + weights=[self.word2vec_embedding], + trainable=True, + ) + + titleencoder = self._build_newsencoder(embedding_layer) + self.userencoder = self._build_userencoder(titleencoder, type=hparams.type) + self.newsencoder = titleencoder + + user_present = self.userencoder([his_input_title, user_indexes]) + news_present = layers.TimeDistributed(self.newsencoder)(pred_input_title) + news_present_one = self.newsencoder(pred_title_reshape) + + preds = layers.Dot(axes=-1)([news_present, user_present]) + preds = layers.Activation(activation="softmax")(preds) + + pred_one = layers.Dot(axes=-1)([news_present_one, user_present]) + pred_one = layers.Activation(activation="sigmoid")(pred_one) + + model = keras.Model([user_indexes, his_input_title, pred_input_title], preds) + scorer = keras.Model( + [user_indexes, his_input_title, pred_input_title_one], pred_one + ) + + return model, scorer
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/naml.html b/_modules/recommenders/models/newsrec/models/naml.html new file mode 100644 index 0000000000..98f57dff64 --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/naml.html @@ -0,0 +1,785 @@ + + + + + + + + + + + recommenders.models.newsrec.models.naml — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.naml

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow.keras as keras
+from tensorflow.keras import layers
+
+
+from recommenders.models.newsrec.models.base_model import BaseModel
+from recommenders.models.newsrec.models.layers import AttLayer2
+
+__all__ = ["NAMLModel"]
+
+
+
[docs]class NAMLModel(BaseModel): + """NAML model(Neural News Recommendation with Attentive Multi-View Learning) + + Chuhan Wu, Fangzhao Wu, Mingxiao An, Jianqiang Huang, Yongfeng Huang and Xing Xie, + Neural News Recommendation with Attentive Multi-View Learning, IJCAI 2019 + + Attributes: + word2vec_embedding (numpy.ndarray): Pretrained word embedding matrix. + hparam (object): Global hyper-parameters. + """ + + def __init__(self, hparams, iterator_creator, seed=None): + """Initialization steps for NAML. + Compared with the BaseModel, NAML need word embedding. + After creating word embedding matrix, BaseModel's __init__ method will be called. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as filter_num are there. + iterator_creator_train (object): NAML data loader class for train data. + iterator_creator_test (object): NAML data loader class for test and validation data + """ + + self.word2vec_embedding = self._init_embedding(hparams.wordEmb_file) + self.hparam = hparams + + super().__init__(hparams, iterator_creator, seed=seed) + + def _get_input_label_from_iter(self, batch_data): + input_feat = [ + batch_data["clicked_title_batch"], + batch_data["clicked_ab_batch"], + batch_data["clicked_vert_batch"], + batch_data["clicked_subvert_batch"], + batch_data["candidate_title_batch"], + batch_data["candidate_ab_batch"], + batch_data["candidate_vert_batch"], + batch_data["candidate_subvert_batch"], + ] + input_label = batch_data["labels"] + return input_feat, input_label + + def _get_user_feature_from_iter(self, batch_data): + """get input of user encoder + Args: + batch_data: input batch data from user iterator + + Returns: + numpy.ndarray: input user feature (clicked title batch) + """ + input_feature = [ + batch_data["clicked_title_batch"], + batch_data["clicked_ab_batch"], + batch_data["clicked_vert_batch"], + batch_data["clicked_subvert_batch"], + ] + input_feature = np.concatenate(input_feature, axis=-1) + return input_feature + + def _get_news_feature_from_iter(self, batch_data): + """get input of news encoder + Args: + batch_data: input batch data from news iterator + + Returns: + numpy.ndarray: input news feature (candidate title batch) + """ + input_feature = [ + batch_data["candidate_title_batch"], + batch_data["candidate_ab_batch"], + batch_data["candidate_vert_batch"], + batch_data["candidate_subvert_batch"], + ] + input_feature = np.concatenate(input_feature, axis=-1) + return input_feature + + def _build_graph(self): + """Build NAML model and scorer. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + + model, scorer = self._build_naml() + return model, scorer + + def _build_userencoder(self, newsencoder): + """The main function to create user encoder of NAML. + + Args: + newsencoder (object): the news encoder of NAML. + + Return: + object: the user encoder of NAML. + """ + hparams = self.hparams + his_input_title_body_verts = keras.Input( + shape=(hparams.his_size, hparams.title_size + hparams.body_size + 2), + dtype="int32", + ) + + click_news_presents = layers.TimeDistributed(newsencoder)( + his_input_title_body_verts + ) + user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)( + click_news_presents + ) + + model = keras.Model( + his_input_title_body_verts, user_present, name="user_encoder" + ) + return model + + def _build_newsencoder(self, embedding_layer): + """The main function to create news encoder of NAML. + news encoder in composed of title encoder, body encoder, vert encoder and subvert encoder + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the news encoder of NAML. + """ + hparams = self.hparams + input_title_body_verts = keras.Input( + shape=(hparams.title_size + hparams.body_size + 2,), dtype="int32" + ) + + sequences_input_title = layers.Lambda(lambda x: x[:, : hparams.title_size])( + input_title_body_verts + ) + sequences_input_body = layers.Lambda( + lambda x: x[:, hparams.title_size : hparams.title_size + hparams.body_size] + )(input_title_body_verts) + input_vert = layers.Lambda( + lambda x: x[ + :, + hparams.title_size + + hparams.body_size : hparams.title_size + + hparams.body_size + + 1, + ] + )(input_title_body_verts) + input_subvert = layers.Lambda( + lambda x: x[:, hparams.title_size + hparams.body_size + 1 :] + )(input_title_body_verts) + + title_repr = self._build_titleencoder(embedding_layer)(sequences_input_title) + body_repr = self._build_bodyencoder(embedding_layer)(sequences_input_body) + vert_repr = self._build_vertencoder()(input_vert) + subvert_repr = self._build_subvertencoder()(input_subvert) + + concate_repr = layers.Concatenate(axis=-2)( + [title_repr, body_repr, vert_repr, subvert_repr] + ) + news_repr = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)( + concate_repr + ) + + model = keras.Model(input_title_body_verts, news_repr, name="news_encoder") + return model + + def _build_titleencoder(self, embedding_layer): + """build title encoder of NAML news encoder. + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the title encoder of NAML. + """ + hparams = self.hparams + sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32") + embedded_sequences_title = embedding_layer(sequences_input_title) + + y = layers.Dropout(hparams.dropout)(embedded_sequences_title) + y = layers.Conv1D( + hparams.filter_num, + hparams.window_size, + activation=hparams.cnn_activation, + padding="same", + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(y) + y = layers.Dropout(hparams.dropout)(y) + pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y) + pred_title = layers.Reshape((1, hparams.filter_num))(pred_title) + + model = keras.Model(sequences_input_title, pred_title, name="title_encoder") + return model + + def _build_bodyencoder(self, embedding_layer): + """build body encoder of NAML news encoder. + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the body encoder of NAML. + """ + hparams = self.hparams + sequences_input_body = keras.Input(shape=(hparams.body_size,), dtype="int32") + embedded_sequences_body = embedding_layer(sequences_input_body) + + y = layers.Dropout(hparams.dropout)(embedded_sequences_body) + y = layers.Conv1D( + hparams.filter_num, + hparams.window_size, + activation=hparams.cnn_activation, + padding="same", + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(y) + y = layers.Dropout(hparams.dropout)(y) + pred_body = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y) + pred_body = layers.Reshape((1, hparams.filter_num))(pred_body) + + model = keras.Model(sequences_input_body, pred_body, name="body_encoder") + return model + + def _build_vertencoder(self): + """build vert encoder of NAML news encoder. + + Return: + object: the vert encoder of NAML. + """ + hparams = self.hparams + input_vert = keras.Input(shape=(1,), dtype="int32") + + vert_embedding = layers.Embedding( + hparams.vert_num, hparams.vert_emb_dim, trainable=True + ) + + vert_emb = vert_embedding(input_vert) + pred_vert = layers.Dense( + hparams.filter_num, + activation=hparams.dense_activation, + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(vert_emb) + pred_vert = layers.Reshape((1, hparams.filter_num))(pred_vert) + + model = keras.Model(input_vert, pred_vert, name="vert_encoder") + return model + + def _build_subvertencoder(self): + """build subvert encoder of NAML news encoder. + + Return: + object: the subvert encoder of NAML. + """ + hparams = self.hparams + input_subvert = keras.Input(shape=(1,), dtype="int32") + + subvert_embedding = layers.Embedding( + hparams.subvert_num, hparams.subvert_emb_dim, trainable=True + ) + + subvert_emb = subvert_embedding(input_subvert) + pred_subvert = layers.Dense( + hparams.filter_num, + activation=hparams.dense_activation, + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(subvert_emb) + pred_subvert = layers.Reshape((1, hparams.filter_num))(pred_subvert) + + model = keras.Model(input_subvert, pred_subvert, name="subvert_encoder") + return model + + def _build_naml(self): + """The main function to create NAML's logic. The core of NAML + is a user encoder and a news encoder. + + Returns: + object: a model used to train. + object: a model used to evaluate and predict. + """ + hparams = self.hparams + + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + his_input_body = keras.Input( + shape=(hparams.his_size, hparams.body_size), dtype="int32" + ) + his_input_vert = keras.Input(shape=(hparams.his_size, 1), dtype="int32") + his_input_subvert = keras.Input(shape=(hparams.his_size, 1), dtype="int32") + + pred_input_title = keras.Input( + shape=(hparams.npratio + 1, hparams.title_size), dtype="int32" + ) + pred_input_body = keras.Input( + shape=(hparams.npratio + 1, hparams.body_size), dtype="int32" + ) + pred_input_vert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32") + pred_input_subvert = keras.Input(shape=(hparams.npratio + 1, 1), dtype="int32") + + pred_input_title_one = keras.Input( + shape=( + 1, + hparams.title_size, + ), + dtype="int32", + ) + pred_input_body_one = keras.Input( + shape=( + 1, + hparams.body_size, + ), + dtype="int32", + ) + pred_input_vert_one = keras.Input(shape=(1, 1), dtype="int32") + pred_input_subvert_one = keras.Input(shape=(1, 1), dtype="int32") + + his_title_body_verts = layers.Concatenate(axis=-1)( + [his_input_title, his_input_body, his_input_vert, his_input_subvert] + ) + + pred_title_body_verts = layers.Concatenate(axis=-1)( + [pred_input_title, pred_input_body, pred_input_vert, pred_input_subvert] + ) + + pred_title_body_verts_one = layers.Concatenate(axis=-1)( + [ + pred_input_title_one, + pred_input_body_one, + pred_input_vert_one, + pred_input_subvert_one, + ] + ) + pred_title_body_verts_one = layers.Reshape((-1,))(pred_title_body_verts_one) + + embedding_layer = layers.Embedding( + self.word2vec_embedding.shape[0], + hparams.word_emb_dim, + weights=[self.word2vec_embedding], + trainable=True, + ) + + self.newsencoder = self._build_newsencoder(embedding_layer) + self.userencoder = self._build_userencoder(self.newsencoder) + + user_present = self.userencoder(his_title_body_verts) + news_present = layers.TimeDistributed(self.newsencoder)(pred_title_body_verts) + news_present_one = self.newsencoder(pred_title_body_verts_one) + + preds = layers.Dot(axes=-1)([news_present, user_present]) + preds = layers.Activation(activation="softmax")(preds) + + pred_one = layers.Dot(axes=-1)([news_present_one, user_present]) + pred_one = layers.Activation(activation="sigmoid")(pred_one) + + model = keras.Model( + [ + his_input_title, + his_input_body, + his_input_vert, + his_input_subvert, + pred_input_title, + pred_input_body, + pred_input_vert, + pred_input_subvert, + ], + preds, + ) + + scorer = keras.Model( + [ + his_input_title, + his_input_body, + his_input_vert, + his_input_subvert, + pred_input_title_one, + pred_input_body_one, + pred_input_vert_one, + pred_input_subvert_one, + ], + pred_one, + ) + + return model, scorer
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/npa.html b/_modules/recommenders/models/newsrec/models/npa.html new file mode 100644 index 0000000000..c440f32bb9 --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/npa.html @@ -0,0 +1,608 @@ + + + + + + + + + + + recommenders.models.newsrec.models.npa — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.npa

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow.keras as keras
+from tensorflow.keras import layers
+
+
+from recommenders.models.newsrec.models.base_model import BaseModel
+from recommenders.models.newsrec.models.layers import PersonalizedAttentivePooling
+
+__all__ = ["NPAModel"]
+
+
+
[docs]class NPAModel(BaseModel): + """NPA model(Neural News Recommendation with Attentive Multi-View Learning) + + Chuhan Wu, Fangzhao Wu, Mingxiao An, Jianqiang Huang, Yongfeng Huang and Xing Xie: + NPA: Neural News Recommendation with Personalized Attention, KDD 2019, ADS track. + + Attributes: + word2vec_embedding (numpy.ndarray): Pretrained word embedding matrix. + hparam (object): Global hyper-parameters. + """ + + def __init__(self, hparams, iterator_creator, seed=None): + """Initialization steps for MANL. + Compared with the BaseModel, NPA need word embedding. + After creating word embedding matrix, BaseModel's __init__ method will be called. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as filter_num are there. + iterator_creator_train (object): NPA data loader class for train data. + iterator_creator_test (object): NPA data loader class for test and validation data + """ + + self.word2vec_embedding = self._init_embedding(hparams.wordEmb_file) + self.hparam = hparams + + super().__init__(hparams, iterator_creator, seed=seed) + + def _get_input_label_from_iter(self, batch_data): + input_feat = [ + batch_data["user_index_batch"], + batch_data["clicked_title_batch"], + batch_data["candidate_title_batch"], + ] + input_label = batch_data["labels"] + return input_feat, input_label + + def _build_graph(self): + """Build NPA model and scorer. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + + model, scorer = self._build_npa() + return model, scorer + + def _build_userencoder(self, titleencoder, user_embedding_layer): + """The main function to create user encoder of NPA. + + Args: + titleencoder (object): the news encoder of NPA. + + Return: + object: the user encoder of NPA. + """ + hparams = self.hparams + + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + user_indexes = keras.Input(shape=(1,), dtype="int32") + + nuser_id = layers.Reshape((1, 1))(user_indexes) + repeat_uids = layers.Concatenate(axis=-2)([nuser_id] * hparams.his_size) + his_title_uid = layers.Concatenate(axis=-1)([his_input_title, repeat_uids]) + + click_title_presents = layers.TimeDistributed(titleencoder)(his_title_uid) + + u_emb = layers.Reshape((hparams.user_emb_dim,))( + user_embedding_layer(user_indexes) + ) + user_present = PersonalizedAttentivePooling( + hparams.his_size, + hparams.filter_num, + hparams.attention_hidden_dim, + seed=self.seed, + )([click_title_presents, layers.Dense(hparams.attention_hidden_dim)(u_emb)]) + + model = keras.Model( + [his_input_title, user_indexes], user_present, name="user_encoder" + ) + return model + + def _build_newsencoder(self, embedding_layer, user_embedding_layer): + """The main function to create news encoder of NPA. + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the news encoder of NPA. + """ + hparams = self.hparams + sequence_title_uindex = keras.Input( + shape=(hparams.title_size + 1,), dtype="int32" + ) + + sequences_input_title = layers.Lambda(lambda x: x[:, : hparams.title_size])( + sequence_title_uindex + ) + user_index = layers.Lambda(lambda x: x[:, hparams.title_size :])( + sequence_title_uindex + ) + + u_emb = layers.Reshape((hparams.user_emb_dim,))( + user_embedding_layer(user_index) + ) + embedded_sequences_title = embedding_layer(sequences_input_title) + + y = layers.Dropout(hparams.dropout)(embedded_sequences_title) + y = layers.Conv1D( + hparams.filter_num, + hparams.window_size, + activation=hparams.cnn_activation, + padding="same", + bias_initializer=keras.initializers.Zeros(), + kernel_initializer=keras.initializers.glorot_uniform(seed=self.seed), + )(y) + y = layers.Dropout(hparams.dropout)(y) + + pred_title = PersonalizedAttentivePooling( + hparams.title_size, + hparams.filter_num, + hparams.attention_hidden_dim, + seed=self.seed, + )([y, layers.Dense(hparams.attention_hidden_dim)(u_emb)]) + + # pred_title = Reshape((1, feature_size))(pred_title) + model = keras.Model(sequence_title_uindex, pred_title, name="news_encoder") + return model + + def _build_npa(self): + """The main function to create NPA's logic. The core of NPA + is a user encoder and a news encoder. + + Returns: + object: a model used to train. + object: a model used to evaluate and predict. + """ + hparams = self.hparams + + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + pred_input_title = keras.Input( + shape=(hparams.npratio + 1, hparams.title_size), dtype="int32" + ) + pred_input_title_one = keras.Input( + shape=( + 1, + hparams.title_size, + ), + dtype="int32", + ) + pred_title_one_reshape = layers.Reshape((hparams.title_size,))( + pred_input_title_one + ) + user_indexes = keras.Input(shape=(1,), dtype="int32") + + nuser_index = layers.Reshape((1, 1))(user_indexes) + repeat_uindex = layers.Concatenate(axis=-2)( + [nuser_index] * (hparams.npratio + 1) + ) + pred_title_uindex = layers.Concatenate(axis=-1)( + [pred_input_title, repeat_uindex] + ) + pred_title_uindex_one = layers.Concatenate()( + [pred_title_one_reshape, user_indexes] + ) + + embedding_layer = layers.Embedding( + self.word2vec_embedding.shape[0], + hparams.word_emb_dim, + weights=[self.word2vec_embedding], + trainable=True, + ) + + user_embedding_layer = layers.Embedding( + len(self.train_iterator.uid2index), + hparams.user_emb_dim, + trainable=True, + embeddings_initializer="zeros", + ) + + titleencoder = self._build_newsencoder(embedding_layer, user_embedding_layer) + userencoder = self._build_userencoder(titleencoder, user_embedding_layer) + newsencoder = titleencoder + + user_present = userencoder([his_input_title, user_indexes]) + + news_present = layers.TimeDistributed(newsencoder)(pred_title_uindex) + news_present_one = newsencoder(pred_title_uindex_one) + + preds = layers.Dot(axes=-1)([news_present, user_present]) + preds = layers.Activation(activation="softmax")(preds) + + pred_one = layers.Dot(axes=-1)([news_present_one, user_present]) + pred_one = layers.Activation(activation="sigmoid")(pred_one) + + model = keras.Model([user_indexes, his_input_title, pred_input_title], preds) + scorer = keras.Model( + [user_indexes, his_input_title, pred_input_title_one], pred_one + ) + + return model, scorer
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/models/nrms.html b/_modules/recommenders/models/newsrec/models/nrms.html new file mode 100644 index 0000000000..9022ac03e2 --- /dev/null +++ b/_modules/recommenders/models/newsrec/models/nrms.html @@ -0,0 +1,582 @@ + + + + + + + + + + + recommenders.models.newsrec.models.nrms — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.models.nrms

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow.keras as keras
+from tensorflow.keras import layers
+
+
+from recommenders.models.newsrec.models.base_model import BaseModel
+from recommenders.models.newsrec.models.layers import AttLayer2, SelfAttention
+
+__all__ = ["NRMSModel"]
+
+
+
[docs]class NRMSModel(BaseModel): + """NRMS model(Neural News Recommendation with Multi-Head Self-Attention) + + Chuhan Wu, Fangzhao Wu, Suyu Ge, Tao Qi, Yongfeng Huang,and Xing Xie, "Neural News + Recommendation with Multi-Head Self-Attention" in Proceedings of the 2019 Conference + on Empirical Methods in Natural Language Processing and the 9th International Joint Conference + on Natural Language Processing (EMNLP-IJCNLP) + + Attributes: + word2vec_embedding (numpy.ndarray): Pretrained word embedding matrix. + hparam (object): Global hyper-parameters. + """ + + def __init__( + self, + hparams, + iterator_creator, + seed=None, + ): + """Initialization steps for NRMS. + Compared with the BaseModel, NRMS need word embedding. + After creating word embedding matrix, BaseModel's __init__ method will be called. + + Args: + hparams (object): Global hyper-parameters. Some key setttings such as head_num and head_dim are there. + iterator_creator_train (object): NRMS data loader class for train data. + iterator_creator_test (object): NRMS data loader class for test and validation data + """ + self.word2vec_embedding = self._init_embedding(hparams.wordEmb_file) + + super().__init__( + hparams, + iterator_creator, + seed=seed, + ) + + def _get_input_label_from_iter(self, batch_data): + """get input and labels for trainning from iterator + + Args: + batch data: input batch data from iterator + + Returns: + list: input feature fed into model (clicked_title_batch & candidate_title_batch) + numpy.ndarray: labels + """ + input_feat = [ + batch_data["clicked_title_batch"], + batch_data["candidate_title_batch"], + ] + input_label = batch_data["labels"] + return input_feat, input_label + + def _get_user_feature_from_iter(self, batch_data): + """get input of user encoder + Args: + batch_data: input batch data from user iterator + + Returns: + numpy.ndarray: input user feature (clicked title batch) + """ + return batch_data["clicked_title_batch"] + + def _get_news_feature_from_iter(self, batch_data): + """get input of news encoder + Args: + batch_data: input batch data from news iterator + + Returns: + numpy.ndarray: input news feature (candidate title batch) + """ + return batch_data["candidate_title_batch"] + + def _build_graph(self): + """Build NRMS model and scorer. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + model, scorer = self._build_nrms() + return model, scorer + + def _build_userencoder(self, titleencoder): + """The main function to create user encoder of NRMS. + + Args: + titleencoder (object): the news encoder of NRMS. + + Return: + object: the user encoder of NRMS. + """ + hparams = self.hparams + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + + click_title_presents = layers.TimeDistributed(titleencoder)(his_input_title) + y = SelfAttention(hparams.head_num, hparams.head_dim, seed=self.seed)( + [click_title_presents] * 3 + ) + user_present = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y) + + model = keras.Model(his_input_title, user_present, name="user_encoder") + return model + + def _build_newsencoder(self, embedding_layer): + """The main function to create news encoder of NRMS. + + Args: + embedding_layer (object): a word embedding layer. + + Return: + object: the news encoder of NRMS. + """ + hparams = self.hparams + sequences_input_title = keras.Input(shape=(hparams.title_size,), dtype="int32") + + embedded_sequences_title = embedding_layer(sequences_input_title) + + y = layers.Dropout(hparams.dropout)(embedded_sequences_title) + y = SelfAttention(hparams.head_num, hparams.head_dim, seed=self.seed)([y, y, y]) + y = layers.Dropout(hparams.dropout)(y) + pred_title = AttLayer2(hparams.attention_hidden_dim, seed=self.seed)(y) + + model = keras.Model(sequences_input_title, pred_title, name="news_encoder") + return model + + def _build_nrms(self): + """The main function to create NRMS's logic. The core of NRMS + is a user encoder and a news encoder. + + Returns: + object: a model used to train. + object: a model used to evaluate and inference. + """ + hparams = self.hparams + + his_input_title = keras.Input( + shape=(hparams.his_size, hparams.title_size), dtype="int32" + ) + pred_input_title = keras.Input( + shape=(hparams.npratio + 1, hparams.title_size), dtype="int32" + ) + pred_input_title_one = keras.Input( + shape=( + 1, + hparams.title_size, + ), + dtype="int32", + ) + pred_title_one_reshape = layers.Reshape((hparams.title_size,))( + pred_input_title_one + ) + + embedding_layer = layers.Embedding( + self.word2vec_embedding.shape[0], + hparams.word_emb_dim, + weights=[self.word2vec_embedding], + trainable=True, + ) + + titleencoder = self._build_newsencoder(embedding_layer) + self.userencoder = self._build_userencoder(titleencoder) + self.newsencoder = titleencoder + + user_present = self.userencoder(his_input_title) + news_present = layers.TimeDistributed(self.newsencoder)(pred_input_title) + news_present_one = self.newsencoder(pred_title_one_reshape) + + preds = layers.Dot(axes=-1)([news_present, user_present]) + preds = layers.Activation(activation="softmax")(preds) + + pred_one = layers.Dot(axes=-1)([news_present_one, user_present]) + pred_one = layers.Activation(activation="sigmoid")(pred_one) + + model = keras.Model([his_input_title, pred_input_title], preds) + scorer = keras.Model([his_input_title, pred_input_title_one], pred_one) + + return model, scorer
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/newsrec/newsrec_utils.html b/_modules/recommenders/models/newsrec/newsrec_utils.html new file mode 100644 index 0000000000..7afef380ff --- /dev/null +++ b/_modules/recommenders/models/newsrec/newsrec_utils.html @@ -0,0 +1,722 @@ + + + + + + + + + + + recommenders.models.newsrec.newsrec_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.newsrec.newsrec_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+from recommenders.models.deeprec.deeprec_utils import (
+    flat_config,
+    HParams,
+    load_yaml,
+)
+import random
+import re
+
+
+
[docs]def check_type(config): + """Check that the config parameters are the correct type + + Args: + config (dict): Configuration dictionary. + + Raises: + TypeError: If the parameters are not the correct type. + """ + + int_parameters = [ + "word_size", + "his_size", + "title_size", + "body_size", + "npratio", + "word_emb_dim", + "attention_hidden_dim", + "epochs", + "batch_size", + "show_step", + "save_epoch", + "head_num", + "head_dim", + "user_num", + "filter_num", + "window_size", + "gru_unit", + "user_emb_dim", + "vert_emb_dim", + "subvert_emb_dim", + ] + for param in int_parameters: + if param in config and not isinstance(config[param], int): + raise TypeError("Parameters {0} must be int".format(param)) + + float_parameters = ["learning_rate", "dropout"] + for param in float_parameters: + if param in config and not isinstance(config[param], float): + raise TypeError("Parameters {0} must be float".format(param)) + + str_parameters = [ + "wordEmb_file", + "wordDict_file", + "userDict_file", + "vertDict_file", + "subvertDict_file", + "method", + "loss", + "optimizer", + "cnn_activation", + "dense_activation" "type", + ] + for param in str_parameters: + if param in config and not isinstance(config[param], str): + raise TypeError("Parameters {0} must be str".format(param)) + + list_parameters = ["layer_sizes", "activation"] + for param in list_parameters: + if param in config and not isinstance(config[param], list): + raise TypeError("Parameters {0} must be list".format(param)) + + bool_parameters = ["support_quick_scoring"] + for param in bool_parameters: + if param in config and not isinstance(config[param], bool): + raise TypeError("Parameters {0} must be bool".format(param))
+ + +
[docs]def check_nn_config(f_config): + """Check neural networks configuration. + + Args: + f_config (dict): Neural network configuration. + + Raises: + ValueError: If the parameters are not correct. + """ + + if f_config["model_type"] in ["nrms", "NRMS"]: + required_parameters = [ + "title_size", + "his_size", + "wordEmb_file", + "wordDict_file", + "userDict_file", + "npratio", + "data_format", + "word_emb_dim", + # nrms + "head_num", + "head_dim", + # attention + "attention_hidden_dim", + "loss", + "data_format", + "dropout", + ] + + elif f_config["model_type"] in ["naml", "NAML"]: + required_parameters = [ + "title_size", + "body_size", + "his_size", + "wordEmb_file", + "subvertDict_file", + "vertDict_file", + "wordDict_file", + "userDict_file", + "npratio", + "data_format", + "word_emb_dim", + "vert_emb_dim", + "subvert_emb_dim", + # naml + "filter_num", + "cnn_activation", + "window_size", + "dense_activation", + # attention + "attention_hidden_dim", + "loss", + "data_format", + "dropout", + ] + elif f_config["model_type"] in ["lstur", "LSTUR"]: + required_parameters = [ + "title_size", + "his_size", + "wordEmb_file", + "wordDict_file", + "userDict_file", + "npratio", + "data_format", + "word_emb_dim", + # lstur + "gru_unit", + "type", + "filter_num", + "cnn_activation", + "window_size", + # attention + "attention_hidden_dim", + "loss", + "data_format", + "dropout", + ] + elif f_config["model_type"] in ["npa", "NPA"]: + required_parameters = [ + "title_size", + "his_size", + "wordEmb_file", + "wordDict_file", + "userDict_file", + "npratio", + "data_format", + "word_emb_dim", + # npa + "user_emb_dim", + "filter_num", + "cnn_activation", + "window_size", + # attention + "attention_hidden_dim", + "loss", + "data_format", + "dropout", + ] + else: + required_parameters = [] + + # check required parameters + for param in required_parameters: + if param not in f_config: + raise ValueError("Parameters {0} must be set".format(param)) + + if f_config["model_type"] in ["nrms", "NRMS", "lstur", "LSTUR"]: + if f_config["data_format"] != "news": + raise ValueError( + "For nrms and naml model, data format must be 'news', but your set is {0}".format( + f_config["data_format"] + ) + ) + elif f_config["model_type"] in ["naml", "NAML"]: + if f_config["data_format"] != "naml": + raise ValueError( + "For nrms and naml model, data format must be 'naml', but your set is {0}".format( + f_config["data_format"] + ) + ) + + check_type(f_config)
+ + +
[docs]def create_hparams(flags): + """Create the model hyperparameters. + + Args: + flags (dict): Dictionary with the model requirements. + + Returns: + HParams: Hyperparameter object. + """ + init_dict = { + # data + "support_quick_scoring": False, + # models + "dropout": 0.0, + "attention_hidden_dim": 200, + # nrms + "head_num": 4, + "head_dim": 100, + # naml + "filter_num": 200, + "window_size": 3, + "vert_emb_dim": 100, + "subvert_emb_dim": 100, + # lstur + "gru_unit": 400, + "type": "ini", + # npa + "user_emb_dim": 50, + # train + "learning_rate": 0.001, + "optimizer": "adam", + "epochs": 10, + "batch_size": 1, + # show info + "show_step": 1, + } + init_dict.update(flags) + return HParams(init_dict)
+ + +
[docs]def prepare_hparams(yaml_file=None, **kwargs): + """Prepare the model hyperparameters and check that all have the correct value. + + Args: + yaml_file (str): YAML file as configuration. + + Returns: + HParams: Hyperparameter object. + """ + if yaml_file is not None: + config = load_yaml(yaml_file) + config = flat_config(config) + else: + config = {} + + config.update(kwargs) + + check_nn_config(config) + return create_hparams(config)
+ + +
[docs]def word_tokenize(sent): + """Split sentence into word list using regex. + Args: + sent (str): Input sentence + + Return: + list: word list + """ + pat = re.compile(r"[\w]+|[.,!?;|]") + if isinstance(sent, str): + return pat.findall(sent.lower()) + else: + return []
+ + +
[docs]def newsample(news, ratio): + """Sample ratio samples from news list. + If length of news is less than ratio, pad zeros. + + Args: + news (list): input news list + ratio (int): sample number + + Returns: + list: output of sample list. + """ + if ratio > len(news): + return news + [0] * (ratio - len(news)) + else: + return random.sample(news, ratio)
+ + +
[docs]def get_mind_data_set(type): + """Get MIND dataset address + + Args: + type (str): type of mind dataset, must be in ['large', 'small', 'demo'] + + Returns: + list: data url and train valid dataset name + """ + assert type in ["large", "small", "demo"] + + if type == "large": + return ( + "https://mind201910small.blob.core.windows.net/release/", + "MINDlarge_train.zip", + "MINDlarge_dev.zip", + "MINDlarge_utils.zip", + ) + + elif type == "small": + return ( + "https://mind201910small.blob.core.windows.net/release/", + "MINDsmall_train.zip", + "MINDsmall_dev.zip", + "MINDsmall_utils.zip", + ) + + elif type == "demo": + return ( + "https://recodatasets.z20.web.core.windows.net/newsrec/", + "MINDdemo_train.zip", + "MINDdemo_dev.zip", + "MINDdemo_utils.zip", + )
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/rbm/rbm.html b/_modules/recommenders/models/rbm/rbm.html new file mode 100644 index 0000000000..faf316de1c --- /dev/null +++ b/_modules/recommenders/models/rbm/rbm.html @@ -0,0 +1,1125 @@ + + + + + + + + + + + recommenders.models.rbm.rbm — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.rbm.rbm

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import tensorflow as tf
+import logging
+import os
+from pathlib import Path
+
+tf.compat.v1.disable_eager_execution()
+log = logging.getLogger(__name__)
+
+
+
[docs]class RBM: + """Restricted Boltzmann Machine""" + + def __init__( + self, + possible_ratings, + visible_units, + hidden_units=500, + keep_prob=0.7, + init_stdv=0.1, + learning_rate=0.004, + minibatch_size=100, + training_epoch=20, + display_epoch=10, + sampling_protocol=[50, 70, 80, 90, 100], + debug=False, + with_metrics=False, + seed=42, + ): + """Implementation of a multinomial Restricted Boltzmann Machine for collaborative filtering + in numpy/pandas/tensorflow + + Based on the article by Ruslan Salakhutdinov, Andriy Mnih and Geoffrey Hinton + https://www.cs.toronto.edu/~rsalakhu/papers/rbmcf.pdf + + In this implementation we use multinomial units instead of the one-hot-encoded used in + the paper. This means that the weights are rank 2 (matrices) instead of rank 3 tensors. + + Basic mechanics: + + 1) A computational graph is created when the RBM class is instantiated. + For an item based recommender this consists of: + visible units: The number n_visible of visible units equals the number of items + hidden units : hyperparameter to fix during training + + 2) Gibbs Sampling: + + 2.1) for each training epoch, the visible units are first clamped on the data + + 2.2) The activation probability of the hidden units, given a linear combination of + the visibles, is evaluated P(h=1|phi_v). The latter is then used to sample the + value of the hidden units. + + 2.3) The probability P(v=l|phi_h) is evaluated, where l=1,..,r are the ratings (e.g. + r=5 for the movielens dataset). In general, this is a multinomial distribution, + from which we sample the value of v. + + 2.4) This step is repeated k times, where k increases as optimization converges. It is + essential to fix to zero the original unrated items during the all learning process. + + 3) Optimization: + The free energy of the visible units given the hidden is evaluated at the beginning (F_0) + and after k steps of Bernoulli sampling (F_k). The weights and biases are updated by + minimizing the differene F_0 - F_k. + + 4) Inference: + Once the joint probability distribution P(v,h) is learned, this is used to generate ratings + for unrated items for all users + """ + + # RBM parameters + self.n_hidden = hidden_units # number of hidden units + self.keep = keep_prob # keep probability for dropout regularization + + # standard deviation used to initialize the weights matrices + self.stdv = init_stdv + + # learning rate used in the update method of the optimizer + self.learning_rate = learning_rate + + # size of the minibatch used in the random minibatches training; setting to 1 corresponds to + # stochastic gradient descent, and it is considerably slower. Good performance is achieved + # for a size of ~100. + self.minibatch = minibatch_size + self.epochs = training_epoch + 1 # number of epochs used to train the model + + # number of epochs to show the mse error during training + self.display_epoch = display_epoch + + # protocol to increase Gibbs sampling's step. Array containing the + # percentage of the total training epoch when the step increases by 1 + self.sampling_protocol = sampling_protocol + + # if true, functions print their control paramters and/or outputs + self.debug = debug + + # if true, compute msre and accuracy during training + self.with_metrics = with_metrics + + # Seed + self.seed = seed + np.random.seed(self.seed) + tf.compat.v1.set_random_seed(self.seed) + + self.n_visible = visible_units # number of items + + tf.compat.v1.reset_default_graph() + + # ----------------------Initializers------------------------------------- + + # create a sorted list of all the unique ratings (of float type) + self.possible_ratings = possible_ratings + + # create a lookup table to map integer indices to float ratings + self.ratings_lookup_table = tf.lookup.StaticHashTable( + tf.lookup.KeyValueTensorInitializer( + tf.constant(list(range(len(self.possible_ratings))), dtype=tf.int32), + tf.constant(list(self.possible_ratings), dtype=tf.float32), + ), + default_value=0, + ) + + self.generate_graph() + self.init_metrics() + self.init_gpu() + init_graph = tf.compat.v1.global_variables_initializer() + + # Start TF training session on default graph + self.sess = tf.compat.v1.Session(config=self.config_gpu) + self.sess.run(init_graph) + +
[docs] def binomial_sampling(self, pr): + """Binomial sampling of hidden units activations using a rejection method. + + Basic mechanics: + + 1) Extract a random number from a uniform distribution (g) and compare it with + the unit's probability (pr) + + 2) Choose 0 if pr<g, 1 otherwise. It is convenient to implement this condtion using + the relu function. + + Args: + pr (tf.Tensor, float32): Input conditional probability. + g (numpy.ndarray, float32): Uniform probability used for comparison. + + Returns: + tf.Tensor: Float32 tensor of sampled units. The value is 1 if pr>g and 0 otherwise. + """ + + # sample from a Bernoulli distribution with same dimensions as input distribution + g = tf.convert_to_tensor( + value=np.random.uniform(size=pr.shape[1]), dtype=tf.float32 + ) + + # sample the value of the hidden units + h_sampled = tf.nn.relu(tf.sign(pr - g)) + + return h_sampled
+ +
[docs] def multinomial_sampling(self, pr): + """Multinomial Sampling of ratings + + Basic mechanics: + For r classes, we sample r binomial distributions using the rejection method. This is possible + since each class is statistically independent from the other. Note that this is the same method + used in numpy's random.multinomial() function. + + 1) extract a size r array of random numbers from a uniform distribution (g). As pr is normalized, + we need to normalize g as well. + + 2) For each user and item, compare pr with the reference distribution. Note that the latter needs + to be the same for ALL the user/item pairs in the dataset, as by assumptions they are sampled + from a common distribution. + + Args: + pr (tf.Tensor, float32): A distributions of shape (m, n, r), where m is the number of examples, n the number + of features and r the number of classes. pr needs to be normalized, i.e. sum_k p(k) = 1 for all m, at fixed n. + f (tf.Tensor, float32): Normalized, uniform probability used for comparison. + + Returns: + tf.Tensor: An (m,n) float32 tensor of sampled rankings from 1 to r. + """ + g = np.random.uniform(size=pr.shape[2]) # sample from a uniform distribution + f = tf.convert_to_tensor( + value=g / g.sum(), dtype=tf.float32 + ) # normalize and convert to tensor + + samp = tf.nn.relu(tf.sign(pr - f)) # apply rejection method + + # get integer index of the rating to be sampled + v_argmax = tf.cast(tf.argmax(input=samp, axis=2), "int32") + + # lookup the rating using integer index + v_samp = tf.cast(self.ratings_lookup_table.lookup(v_argmax), "float32") + + return v_samp
+ +
[docs] def multinomial_distribution(self, phi): + """Probability that unit v has value l given phi: P(v=l|phi) + + Args: + phi (tf.Tensor): linear combination of values of the previous layer + r (float): rating scale, corresponding to the number of classes + + Returns: + tf.Tensor: + - A tensor of shape (r, m, Nv): This needs to be reshaped as (m, Nv, r) in the last step to allow for faster sampling when used in the multinomial function. + + """ + + numerator = [ + tf.exp(tf.multiply(tf.constant(k, dtype="float32"), phi)) + for k in self.possible_ratings + ] + + denominator = tf.reduce_sum(input_tensor=numerator, axis=0) + + prob = tf.compat.v1.div(numerator, denominator) + + return tf.transpose(a=prob, perm=[1, 2, 0])
+ +
[docs] def free_energy(self, x): + """Free energy of the visible units given the hidden units. Since the sum is over the hidden units' + states, the functional form of the visible units Free energy is the same as the one for the binary model. + + Args: + x (tf.Tensor): This can be either the sampled value of the visible units (v_k) or the input data + + Returns: + tf.Tensor: Free energy of the model. + """ + + bias = -tf.reduce_sum(input_tensor=tf.matmul(x, tf.transpose(a=self.bv))) + + phi_x = tf.matmul(x, self.w) + self.bh + f = -tf.reduce_sum(input_tensor=tf.nn.softplus(phi_x)) + + F = bias + f # free energy density per training example + + return F
+ +
[docs] def placeholder(self): + """Initialize the placeholders for the visible units""" + self.vu = tf.compat.v1.placeholder( + shape=[None, self.n_visible], dtype="float32" + )
+ +
[docs] def init_parameters(self): + """Initialize the parameters of the model. + + This is a single layer model with two biases. So we have a rectangular matrix w_{ij} and + two bias vectors to initialize. + + Args: + n_visible (int): number of visible units (input layer) + n_hidden (int): number of hidden units (latent variables of the model) + + Returns: + tf.Tensor, tf.Tensor, tf.Tensor: + - `w` of size (n_visible, n_hidden): correlation matrix initialized by sampling from a normal distribution with zero mean and given variance init_stdv. + - `bv` of size (1, n_visible): visible units' bias, initialized to zero. + - `bh` of size (1, n_hidden): hidden units' bias, initiliazed to zero. + """ + with tf.compat.v1.variable_scope("Network_parameters"): + + self.w = tf.compat.v1.get_variable( + "weight", + [self.n_visible, self.n_hidden], + initializer=tf.compat.v1.random_normal_initializer( + stddev=self.stdv, seed=self.seed + ), + dtype="float32", + ) + + self.bv = tf.compat.v1.get_variable( + "v_bias", + [1, self.n_visible], + initializer=tf.compat.v1.zeros_initializer(), + dtype="float32", + ) + + self.bh = tf.compat.v1.get_variable( + "h_bias", + [1, self.n_hidden], + initializer=tf.compat.v1.zeros_initializer(), + dtype="float32", + )
+ +
[docs] def sample_hidden_units(self, vv): + """Sampling: In RBM we use Contrastive divergence to sample the parameter space. In order to do that we need + to initialize the two conditional probabilities: + + P(h|phi_v) --> returns the probability that the i-th hidden unit is active + + P(v|phi_h) --> returns the probability that the i-th visible unit is active + + Sample hidden units given the visibles. This can be thought of as a Forward pass step in a FFN + + Args: + vv (tf.Tensor, float32): visible units + + Returns: + tf.Tensor, tf.Tensor: + - `phv`: The activation probability of the hidden unit. + - `h_`: The sampled value of the hidden unit from a Bernoulli distributions having success probability `phv`. + """ + + with tf.compat.v1.name_scope("sample_hidden_units"): + + phi_v = tf.matmul(vv, self.w) + self.bh # create a linear combination + phv = tf.nn.sigmoid(phi_v) # conditional probability of h given v + phv_reg = tf.nn.dropout(phv, 1 - (self.keep)) + + # Sampling + h_ = self.binomial_sampling( + phv_reg + ) # obtain the value of the hidden units via Bernoulli sampling + + return phv, h_
+ +
[docs] def sample_visible_units(self, h): + """Sample the visible units given the hiddens. This can be thought of as a Backward pass in a FFN + (negative phase). Each visible unit can take values in [1,rating], while the zero is reserved + for missing data; as such the value of the hidden unit is sampled from a multinomial distribution. + + Basic mechanics: + + 1) For every training example we first sample Nv Multinomial distributions. The result is of the + form [0,1,0,0,0,...,0] where the index of the 1 element corresponds to the rth rating. The index + is extracted using the argmax function and we need to add 1 at the end since array indeces starts + from 0. + + 2) Selects only those units that have been sampled. During the training phase it is important to not + use the reconstructed inputs, so we beed to enforce a zero value in the reconstructed ratings in + the same position as the original input. + + Args: + h (tf.Tensor, float32): visible units. + + Returns: + tf.Tensor, tf.Tensor: + - `pvh`: The activation probability of the visible unit given the hidden. + - `v_`: The sampled value of the visible unit from a Multinomial distributions having success probability `pvh`. + """ + + with tf.compat.v1.name_scope("sample_visible_units"): + + phi_h = tf.matmul(h, tf.transpose(a=self.w)) + self.bv # linear combination + pvh = self.multinomial_distribution( + phi_h + ) # conditional probability of v given h + + # Sampling (modify here ) + v_tmp = self.multinomial_sampling( + pvh + ) # sample the value of the visible units + + mask = tf.equal(self.v, 0) # selects the inactive units in the input vector + + v_ = tf.compat.v1.where( + mask, x=self.v, y=v_tmp + ) # enforce inactive units in the reconstructed vector + + return pvh, v_
+ +
[docs] def gibbs_sampling(self): + """Gibbs sampling: Determines an estimate of the model configuration via sampling. In the binary + RBM we need to impose that unseen movies stay as such, i.e. the sampling phase should not modify + the elements where v=0. + + Args: + k (scalar, integer): iterator. Number of sampling steps. + v (tf.Tensor, float32): visible units. + + Returns: + tf.Tensor, tf.Tensor: + - `h_k`: The sampled value of the hidden unit at step k, float32. + - `v_k`: The sampled value of the visible unit at step k, float32. + """ + + with tf.compat.v1.name_scope("gibbs_sampling"): + + self.v_k = ( + self.v + ) # initialize the value of the visible units at step k=0 on the data + + if self.debug: + print("CD step", self.k) + + for i in range(self.k): # k_sampling + _, h_k = self.sample_hidden_units(self.v_k) + _, self.v_k = self.sample_visible_units(h_k)
+ +
[docs] def losses(self, vv): + """Calculate contrastive divergence, which is the difference between + the free energy clamped on the data (v) and the model Free energy (v_k). + + Args: + vv (tf.Tensor, float32): empirical input + + Returns: + obj: contrastive divergence + """ + + with tf.compat.v1.variable_scope("losses"): + obj = self.free_energy(vv) - self.free_energy(self.v_k) + + return obj
+ +
[docs] def gibbs_protocol(self, i): + """Gibbs protocol. + + Basic mechanics: + + If the current epoch i is in the interval specified in the training protocol, + the number of steps in Gibbs sampling (k) is incremented by one and gibbs_sampling is updated + accordingly. + + Args: + i (int): Current epoch in the loop + """ + + with tf.compat.v1.name_scope("gibbs_protocol"): + + epoch_percentage = ( + i / self.epochs + ) * 100 # current percentage of the total #epochs + + if epoch_percentage != 0: + if ( + epoch_percentage >= self.sampling_protocol[self.l] + and epoch_percentage <= self.sampling_protocol[self.l + 1] + ): + self.k += 1 + self.l += 1 # noqa: E741 ambiguous variable name 'l' + self.gibbs_sampling() + + if self.debug: + log.info("percentage of epochs covered so far %f2" % (epoch_percentage))
+ +
[docs] def data_pipeline(self): + """Define the data pipeline""" + + # placeholder for the batch_size + self.batch_size = tf.compat.v1.placeholder(tf.int64) + + # Create the data pipeline for faster training + self.dataset = tf.data.Dataset.from_tensor_slices(self.vu) + + self.dataset = self.dataset.shuffle( + buffer_size=50, reshuffle_each_iteration=True, seed=self.seed + ) # randomize the batch + + self.dataset = self.dataset.batch(batch_size=self.batch_size).repeat() + + # define iterator + self.iter = tf.compat.v1.data.make_initializable_iterator(self.dataset) + self.v = self.iter.get_next()
+ +
[docs] def init_metrics(self): + """Initialize metrics""" + + if self.with_metrics: # if true (default) returns evaluation metrics + self.rmse = tf.sqrt( + tf.compat.v1.losses.mean_squared_error( + self.v, self.v_k, weights=tf.where(self.v > 0, 1, 0) + ) + )
+ +
[docs] def generate_graph(self): + """Call the different RBM modules to generate the computational graph""" + + log.info("Creating the computational graph") + + self.placeholder() # create the visible units placeholder + self.data_pipeline() # data_pipeline + self.init_parameters() # initialize Network parameters + + # --------------Initialize protocol for Gibbs sampling------------------ + log.info("Initialize Gibbs protocol") + self.k = 1 # initialize the G_sampling step + # initialize epoch_sample index + self.l = 0 # noqa: E741 ambiguous variable name 'l' + self.gibbs_sampling() # returns the sampled value of the visible units + + # ---Instantiate loss function and optimizer---------------------------- + obj = self.losses(self.v) # objective function + + rate = ( + self.learning_rate / self.minibatch + ) # learning rate rescaled by the batch size + + self.opt = tf.compat.v1.train.AdamOptimizer(learning_rate=rate).minimize( + loss=obj + ) # Instantiate the optimizer
+ +
[docs] def init_gpu(self): + """Config GPU memory""" + + self.config_gpu = tf.compat.v1.ConfigProto( + log_device_placement=False, allow_soft_placement=True + ) + self.config_gpu.gpu_options.allow_growth = True # dynamic memory allocation
+ +
[docs] def init_training_session(self, xtr): + """Initialize the TF session on training data + + Args: + xtr (numpy.ndarray, int32): The user/affinity matrix for the train set. + """ + + self.sess.run( + self.iter.initializer, + feed_dict={self.vu: xtr, self.batch_size: self.minibatch}, + ) + + self.sess.run(tf.compat.v1.tables_initializer())
+ +
[docs] def batch_training(self, num_minibatches): + """Perform training over input minibatches. If `self.with_metrics` is False, + no online metrics are evaluated. + + Args: + num_minibatches (scalar, int32): Number of training minibatches. + + Returns: + float: Training error per single epoch. If `self.with_metrics` is False, this is zero. + """ + + epoch_tr_err = 0 # initialize the training error for each epoch to zero + + # minibatch loop + for _ in range(num_minibatches): + + if self.with_metrics: + _, batch_err = self.sess.run([self.opt, self.rmse]) + + # average msr error per minibatch + epoch_tr_err += batch_err / num_minibatches + + else: + _ = self.sess.run(self.opt) + + return epoch_tr_err
+ +
[docs] def fit(self, xtr): + """Fit method + + Training in generative models takes place in two steps: + + 1) Gibbs sampling + 2) Gradient evaluation and parameters update + + This estimate is later used in the weight update step by minimizing the distance between the + model and the empirical free energy. Note that while the unit's configuration space is sampled, + the weights are determined via maximum likelihood (saddle point). + + Main component of the algo; once instantiated, it generates the computational graph and performs + model training + + Args: + xtr (numpy.ndarray, integers): the user/affinity matrix for the train set + xtst (numpy.ndarray, integers): the user/affinity matrix for the test set + """ + + # keep the position of the items in the train set so that they can be optionally exluded from recommendation + self.seen_mask = np.not_equal(xtr, 0) + + n_users = xtr.shape[0] + num_minibatches = int(n_users / self.minibatch) # number of minibatches + + self.init_training_session(xtr) + + rmse_train = [] # List to collect the metrics across epochs + + # start loop over training epochs + for i in range(self.epochs): + + self.gibbs_protocol(i) # Gibbs sampling update + epoch_tr_err = self.batch_training(num_minibatches) # model train + + if self.with_metrics and i % self.display_epoch == 0: + log.info("training epoch %i rmse %f" % (i, epoch_tr_err)) + + rmse_train.append(epoch_tr_err) # mse training error per training epoch + + self.rmse_train = rmse_train
+ +
[docs] def eval_out(self): + """Implement multinomial sampling from a trained model""" + + # Sampling + _, h = self.sample_hidden_units(self.vu) # sample h + + # sample v + phi_h = ( + tf.transpose(a=tf.matmul(self.w, tf.transpose(a=h))) + self.bv + ) # linear combination + pvh = self.multinomial_distribution( + phi_h + ) # conditional probability of v given h + + v = self.multinomial_sampling(pvh) # sample the value of the visible units + + return v, pvh
+ +
[docs] def recommend_k_items(self, x, top_k=10, remove_seen=True): + """Returns the top-k items ordered by a relevancy score. + + Basic mechanics: + + The method samples new ratings from the learned joint distribution, together with their + probabilities. The input x must have the same number of columns as the one used for training + the model (i.e. the same number of items) but it can have an arbitrary number of rows (users). + + A recommendation score is evaluated by taking the element-wise product between the ratings and + the associated probabilities. For example, we could have the following situation: + + .. code-block:: python + + rating probability score + item1 5 0.5 2.5 + item2 4 0.8 3.2 + + then item2 will be recommended. + + Args: + x (numpy.ndarray, int32): input user/affinity matrix. Note that this can be a single vector, i.e. the ratings + of a single user. + top_k (scalar, int32): the number of items to recommend. + + Returns: + numpy.ndarray, float: + - A sparse matrix containing the top_k elements ordered by their score. + - The time taken to recommend k items. + """ + + # evaluate the ratings and the associated probabilities + v_, pvh_ = self.eval_out() + + # evaluate v_ and pvh_ on the input data + vp, pvh = self.sess.run([v_, pvh_], feed_dict={self.vu: x}) + # returns only the probabilities for the predicted ratings in vp + pv = np.max(pvh, axis=2) + + # evaluate the score + score = np.multiply(vp, pv) + # ----------------------Return the results as a P dataframe------------------------------------ + + log.info("Extracting top %i elements" % top_k) + + if remove_seen: + # if true, it removes items from the train set by setting them to zero + vp[self.seen_mask] = 0 + pv[self.seen_mask] = 0 + score[self.seen_mask] = 0 + + top_items = np.argpartition(-score, range(top_k), axis=1)[ + :, :top_k + ] # get the top k items + + score_c = score.copy() # get a copy of the score matrix + + score_c[ + np.arange(score_c.shape[0])[:, None], top_items + ] = 0 # set to zero the top_k elements + + top_scores = score - score_c # set to zeros all elements other then the top_k + + return top_scores
+ +
[docs] def predict(self, x): + """Returns the inferred ratings. This method is similar to recommend_k_items() with the + exceptions that it returns all the inferred ratings + + Basic mechanics: + + The method samples new ratings from the learned joint distribution, together with + their probabilities. The input x must have the same number of columns as the one used + for training the model, i.e. the same number of items, but it can have an arbitrary number + of rows (users). + + Args: + x (numpy.ndarray, int32): Input user/affinity matrix. Note that this can be a single vector, i.e. + the ratings of a single user. + + Returns: + numpy.ndarray, float: + - A matrix with the inferred ratings. + - The elapsed time for predediction. + """ + + v_, _ = self.eval_out() # evaluate the ratings and the associated probabilities + vp = self.sess.run(v_, feed_dict={self.vu: x}) + + return vp
+ +
[docs] def save(self, file_path="./rbm_model.ckpt"): + """Save model parameters to `file_path` + + This function saves the current tensorflow session to a specified path. + + Args: + file_path (str): output file path for the RBM model checkpoint + we will create a new directory if not existing. + """ + + f_path = Path(file_path) + dir_name, file_name = f_path.parent, f_path.name + + # create the directory if it does not exist + os.makedirs(dir_name, exist_ok=True) + + # save trained model + saver = tf.compat.v1.train.Saver() + saver.save(self.sess, os.path.join(dir_name, file_name))
+ +
[docs] def load(self, file_path="./rbm_model.ckpt"): + """Load model parameters for further use. + + This function loads a saved tensorflow session. + + Args: + file_path (str): file path for RBM model checkpoint + """ + + f_path = Path(file_path) + dir_name, file_name = f_path.parent, f_path.name + + # load pre-trained model + saver = tf.compat.v1.train.Saver() + saver.restore(self.sess, os.path.join(dir_name, file_name))
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/rlrmc/RLRMCdataset.html b/_modules/recommenders/models/rlrmc/RLRMCdataset.html new file mode 100644 index 0000000000..6e0d07ccd8 --- /dev/null +++ b/_modules/recommenders/models/rlrmc/RLRMCdataset.html @@ -0,0 +1,544 @@ + + + + + + + + + + + recommenders.models.rlrmc.RLRMCdataset — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.rlrmc.RLRMCdataset

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import pandas as pd
+from scipy.sparse import csr_matrix
+
+from recommenders.utils.constants import (
+    DEFAULT_ITEM_COL,
+    DEFAULT_USER_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_TIMESTAMP_COL,
+)
+
+
+
[docs]class RLRMCdataset(object): + """RLRMC dataset implementation. Creates sparse data structures for RLRMC algorithm.""" + + def __init__( + self, + train, + validation=None, + test=None, + mean_center=True, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, + # seed=42, + ): + """Initialize parameters. + + Args: + train (pandas.DataFrame: training data with at least columns (col_user, col_item, col_rating) + validation (pandas.DataFrame): validation data with at least columns (col_user, col_item, col_rating). validation can be None, if so, we only process the training data + mean_center (bool): flag to mean center the ratings in train (and validation) data + col_user (str): user column name + col_item (str): item column name + col_rating (str): rating column name + col_timestamp (str): timestamp column name + """ + # initialize user and item index + self.user_idx = None + self.item_idx = None + + # get col name of user, item and rating + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_timestamp = col_timestamp + # set random seed + # random.seed(seed) + + # data preprocessing for training and validation data + self._data_processing(train, validation, test, mean_center) + + def _data_processing(self, train, validation=None, test=None, mean_center=True): + """Process the dataset to reindex userID and itemID + + Args: + train (pandas.DataFrame): training data with at least columns (col_user, col_item, col_rating) + validation (pandas.DataFrame): validation data with at least columns (col_user, col_item, col_rating). validation can be None, if so, we only process the training data + mean_center (bool): flag to mean center the ratings in train (and validation) data + + Returns: + list: train and validation pandas.DataFrame Dataset, which have been reindexed. + + """ + # Data processing and reindexing code is adopted from https://github.com/Microsoft/Recommenders/blob/main/recommenders/models/ncf/dataset.py + # If validation dataset is None + df = train if validation is None else train.append(validation) + df = df if test is None else df.append(test) + + # Reindex user and item index + if self.user_idx is None: + # Map user id + user_idx = df[[self.col_user]].drop_duplicates().reindex() + user_idx[self.col_user + "_idx"] = np.arange(len(user_idx)) + self.n_users = len(user_idx) + self.user_idx = user_idx + + self.user2id = dict( + zip(user_idx[self.col_user], user_idx[self.col_user + "_idx"]) + ) + self.id2user = {self.user2id[k]: k for k in self.user2id} + + if self.item_idx is None: + # Map item id + item_idx = df[[self.col_item]].drop_duplicates() + item_idx[self.col_item + "_idx"] = np.arange(len(item_idx)) + self.n_items = len(item_idx) + self.item_idx = item_idx + + self.item2id = dict( + zip(item_idx[self.col_item], item_idx[self.col_item + "_idx"]) + ) + self.id2item = {self.item2id[k]: k for k in self.item2id} + + df_train = self._reindex(train) + + d = len(user_idx) # number of rows + T = len(item_idx) # number of columns + + rows_train = df_train["userID"].values + cols_train = df_train["itemID"].values + entries_omega = df_train["rating"].values + if mean_center: + train_mean = np.mean(entries_omega) + else: + train_mean = 0.0 + entries_train = entries_omega - train_mean + self.model_param = {"num_row": d, "num_col": T, "train_mean": train_mean} + + self.train = csr_matrix( + (entries_train.T.ravel(), (rows_train, cols_train)), shape=(d, T) + ) + + if validation is not None: + df_validation = self._reindex(validation) + rows_validation = df_validation["userID"].values + cols_validation = df_validation["itemID"].values + entries_validation = df_validation["rating"].values - train_mean + self.validation = csr_matrix( + (entries_validation.T.ravel(), (rows_validation, cols_validation)), + shape=(d, T), + ) + else: + self.validation = None + + def _reindex(self, df): + """Process dataset to reindex userID and itemID + + Args: + df (pandas.DataFrame): dataframe with at least columns (col_user, col_item, col_rating) + + Returns: + list: train and validation pandas.DataFrame Dataset, which have been reindexed. + + """ + + # If validation dataset is None + if df is None: + return None + + # Map user_idx and item_idx + df = pd.merge(df, self.user_idx, on=self.col_user, how="left") + df = pd.merge(df, self.item_idx, on=self.col_item, how="left") + + # Select relevant columns + df_reindex = df[ + [self.col_user + "_idx", self.col_item + "_idx", self.col_rating] + ] + df_reindex.columns = [self.col_user, self.col_item, self.col_rating] + + return df_reindex
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/sar/sar_singlenode.html b/_modules/recommenders/models/sar/sar_singlenode.html new file mode 100644 index 0000000000..48b46546e1 --- /dev/null +++ b/_modules/recommenders/models/sar/sar_singlenode.html @@ -0,0 +1,988 @@ + + + + + + + + + + + recommenders.models.sar.sar_singlenode — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.sar.sar_singlenode

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import pandas as pd
+import logging
+from scipy import sparse
+
+from recommenders.utils.python_utils import (
+    cosine_similarity,
+    inclusion_index,
+    jaccard,
+    lexicographers_mutual_information,
+    lift,
+    mutual_information,
+    exponential_decay,
+    get_top_k_scored_items,
+    rescale,
+)
+from recommenders.utils import constants
+
+
+SIM_COOCCUR = "cooccurrence"
+SIM_COSINE = "cosine"
+SIM_INCLUSION_INDEX = "inclusion index"
+SIM_JACCARD = "jaccard"
+SIM_LEXICOGRAPHERS_MUTUAL_INFORMATION = "lexicographers mutual information"
+SIM_LIFT = "lift"
+SIM_MUTUAL_INFORMATION = "mutual information"
+
+logger = logging.getLogger()
+
+
+
[docs]class SARSingleNode: + """Simple Algorithm for Recommendations (SAR) implementation + + SAR is a fast scalable adaptive algorithm for personalized recommendations based on user transaction history + and items description. The core idea behind SAR is to recommend items like those that a user already has + demonstrated an affinity to. It does this by 1) estimating the affinity of users for items, 2) estimating + similarity across items, and then 3) combining the estimates to generate a set of recommendations for a given user. + """ + + def __init__( + self, + col_user=constants.DEFAULT_USER_COL, + col_item=constants.DEFAULT_ITEM_COL, + col_rating=constants.DEFAULT_RATING_COL, + col_timestamp=constants.DEFAULT_TIMESTAMP_COL, + col_prediction=constants.DEFAULT_PREDICTION_COL, + similarity_type=SIM_JACCARD, + time_decay_coefficient=30, + time_now=None, + timedecay_formula=False, + threshold=1, + normalize=False, + ): + """Initialize model parameters + + Args: + col_user (str): user column name + col_item (str): item column name + col_rating (str): rating column name + col_timestamp (str): timestamp column name + col_prediction (str): prediction column name + similarity_type (str): ['cooccurrence', 'cosine', 'inclusion index', 'jaccard', + 'lexicographers mutual information', 'lift', 'mutual information'] option for + computing item-item similarity + time_decay_coefficient (float): number of days till ratings are decayed by 1/2 + time_now (int | None): current time for time decay calculation + timedecay_formula (bool): flag to apply time decay + threshold (int): item-item co-occurrences below this threshold will be removed + normalize (bool): option for normalizing predictions to scale of original ratings + """ + self.col_rating = col_rating + self.col_item = col_item + self.col_user = col_user + self.col_timestamp = col_timestamp + self.col_prediction = col_prediction + + available_similarity_types = [ + SIM_COOCCUR, + SIM_COSINE, + SIM_INCLUSION_INDEX, + SIM_JACCARD, + SIM_LIFT, + SIM_MUTUAL_INFORMATION, + SIM_LEXICOGRAPHERS_MUTUAL_INFORMATION, + ] + if similarity_type not in available_similarity_types: + raise ValueError( + 'Similarity type must be one of ["' + + '" | "'.join(available_similarity_types) + + '"]' + ) + self.similarity_type = similarity_type + self.time_decay_half_life = ( + time_decay_coefficient * 24 * 60 * 60 + ) # convert to seconds + self.time_decay_flag = timedecay_formula + self.time_now = time_now + self.threshold = threshold + self.user_affinity = None + self.item_similarity = None + self.item_frequencies = None + self.user_frequencies = None + + # threshold - items below this number get set to zero in co-occurrence counts + if self.threshold <= 0: + raise ValueError("Threshold cannot be < 1") + + # set flag to capture unity-rating user-affinity matrix for scaling scores + self.normalize = normalize + self.col_unity_rating = "_unity_rating" + self.unity_user_affinity = None + + # column for mapping user / item ids to internal indices + self.col_item_id = "_indexed_items" + self.col_user_id = "_indexed_users" + + # obtain all the users and items from both training and test data + self.n_users = None + self.n_items = None + + # The min and max of the rating scale, obtained from the training data. + self.rating_min = None + self.rating_max = None + + # mapping for item to matrix element + self.user2index = None + self.item2index = None + + # the opposite of the above maps - map array index to actual string ID + self.index2item = None + self.index2user = None + +
[docs] def compute_affinity_matrix(self, df, rating_col): + """Affinity matrix. + + The user-affinity matrix can be constructed by treating the users and items as + indices in a sparse matrix, and the events as the data. Here, we're treating + the ratings as the event weights. We convert between different sparse-matrix + formats to de-duplicate user-item pairs, otherwise they will get added up. + + Args: + df (pandas.DataFrame): Indexed df of users and items + rating_col (str): Name of column to use for ratings + + Returns: + sparse.csr: Affinity matrix in Compressed Sparse Row (CSR) format. + """ + + return sparse.coo_matrix( + (df[rating_col], (df[self.col_user_id], df[self.col_item_id])), + shape=(self.n_users, self.n_items), + ).tocsr()
+ +
[docs] def compute_time_decay(self, df, decay_column): + """Compute time decay on provided column. + + Args: + df (pandas.DataFrame): DataFrame of users and items + decay_column (str): column to decay + + Returns: + pandas.DataFrame: with column decayed + """ + + # if time_now is None use the latest time + if self.time_now is None: + self.time_now = df[self.col_timestamp].max() + + # apply time decay to each rating + df[decay_column] *= exponential_decay( + value=df[self.col_timestamp], + max_val=self.time_now, + half_life=self.time_decay_half_life, + ) + + # group time decayed ratings by user-item and take the sum as the user-item affinity + return df.groupby([self.col_user, self.col_item]).sum().reset_index()
+ +
[docs] def compute_cooccurrence_matrix(self, df): + """Co-occurrence matrix. + + The co-occurrence matrix is defined as :math:`C = U^T * U` + + where U is the user_affinity matrix with 1's as values (instead of ratings). + + Args: + df (pandas.DataFrame): DataFrame of users and items + + Returns: + numpy.ndarray: Co-occurrence matrix + """ + user_item_hits = sparse.coo_matrix( + (np.repeat(1, df.shape[0]), (df[self.col_user_id], df[self.col_item_id])), + shape=(self.n_users, self.n_items), + ).tocsr() + + item_cooccurrence = user_item_hits.transpose().dot(user_item_hits) + item_cooccurrence = item_cooccurrence.multiply( + item_cooccurrence >= self.threshold + ) + + return item_cooccurrence.astype(df[self.col_rating].dtype)
+ +
[docs] def set_index(self, df): + """Generate continuous indices for users and items to reduce memory usage. + + Args: + df (pandas.DataFrame): dataframe with user and item ids + """ + + # generate a map of continuous index values to items + self.index2item = dict(enumerate(df[self.col_item].unique())) + self.index2user = dict(enumerate(df[self.col_user].unique())) + + # invert the mappings from above + self.item2index = {v: k for k, v in self.index2item.items()} + self.user2index = {v: k for k, v in self.index2user.items()} + + # set values for the total count of users and items + self.n_users = len(self.user2index) + self.n_items = len(self.index2item)
+ +
[docs] def fit(self, df): + """Main fit method for SAR. + + Note: + Please make sure that `df` has no duplicates. + + Args: + df (pandas.DataFrame): User item rating dataframe (without duplicates). + """ + select_columns = [self.col_user, self.col_item, self.col_rating] + if self.time_decay_flag: + select_columns += [self.col_timestamp] + + if df[select_columns].duplicated().any(): + raise ValueError("There should not be duplicates in the dataframe") + + # generate continuous indices if this hasn't been done + if self.index2item is None: + self.set_index(df) + + logger.info("Collecting user affinity matrix") + if not np.issubdtype(df[self.col_rating].dtype, np.number): + raise TypeError("Rating column data type must be numeric") + + # copy the DataFrame to avoid modification of the input + temp_df = df[select_columns].copy() + + if self.time_decay_flag: + logger.info("Calculating time-decayed affinities") + temp_df = self.compute_time_decay(df=temp_df, decay_column=self.col_rating) + + logger.info("Creating index columns") + # add mapping of user and item ids to indices + temp_df.loc[:, self.col_item_id] = temp_df[self.col_item].apply( + lambda item: self.item2index.get(item, np.NaN) + ) + temp_df.loc[:, self.col_user_id] = temp_df[self.col_user].apply( + lambda user: self.user2index.get(user, np.NaN) + ) + + if self.normalize: + self.rating_min = temp_df[self.col_rating].min() + self.rating_max = temp_df[self.col_rating].max() + logger.info("Calculating normalization factors") + temp_df[self.col_unity_rating] = 1.0 + if self.time_decay_flag: + temp_df = self.compute_time_decay( + df=temp_df, decay_column=self.col_unity_rating + ) + self.unity_user_affinity = self.compute_affinity_matrix( + df=temp_df, rating_col=self.col_unity_rating + ) + + # affinity matrix + logger.info("Building user affinity sparse matrix") + self.user_affinity = self.compute_affinity_matrix( + df=temp_df, rating_col=self.col_rating + ) + + # calculate item co-occurrence + logger.info("Calculating item co-occurrence") + item_cooccurrence = self.compute_cooccurrence_matrix(df=temp_df) + + # free up some space + del temp_df + + # creates an array with the frequency of every unique item + self.item_frequencies = item_cooccurrence.diagonal() + + logger.info("Calculating item similarity") + if self.similarity_type == SIM_COOCCUR: + logger.info("Using co-occurrence based similarity") + self.item_similarity = item_cooccurrence + elif self.similarity_type == SIM_COSINE: + logger.info("Using cosine similarity") + self.item_similarity = cosine_similarity(item_cooccurrence) + elif self.similarity_type == SIM_INCLUSION_INDEX: + logger.info("Using inclusion index") + self.item_similarity = inclusion_index(item_cooccurrence) + elif self.similarity_type == SIM_JACCARD: + logger.info("Using jaccard based similarity") + self.item_similarity = jaccard(item_cooccurrence) + elif self.similarity_type == SIM_LEXICOGRAPHERS_MUTUAL_INFORMATION: + logger.info("Using lexicographers mutual information similarity") + self.item_similarity = lexicographers_mutual_information(item_cooccurrence) + elif self.similarity_type == SIM_LIFT: + logger.info("Using lift based similarity") + self.item_similarity = lift(item_cooccurrence) + elif self.similarity_type == SIM_MUTUAL_INFORMATION: + logger.info("Using mutual information similarity") + self.item_similarity = mutual_information(item_cooccurrence) + else: + raise ValueError("Unknown similarity type: {}".format(self.similarity_type)) + + # free up some space + del item_cooccurrence + + logger.info("Done training")
+ +
[docs] def score(self, test, remove_seen=False): + """Score all items for test users. + + Args: + test (pandas.DataFrame): user to test + remove_seen (bool): flag to remove items seen in training from recommendation + + Returns: + numpy.ndarray: Value of interest of all items for the users. + """ + + # get user / item indices from test set + user_ids = list( + map( + lambda user: self.user2index.get(user, np.NaN), + test[self.col_user].unique(), + ) + ) + if any(np.isnan(user_ids)): + raise ValueError("SAR cannot score users that are not in the training set") + + # calculate raw scores with a matrix multiplication + logger.info("Calculating recommendation scores") + test_scores = self.user_affinity[user_ids, :].dot(self.item_similarity) + + # ensure we're working with a dense ndarray + if isinstance(test_scores, sparse.spmatrix): + test_scores = test_scores.toarray() + + if self.normalize: + counts = self.unity_user_affinity[user_ids, :].dot(self.item_similarity) + user_min_scores = ( + np.tile(counts.min(axis=1)[:, np.newaxis], test_scores.shape[1]) + * self.rating_min + ) + user_max_scores = ( + np.tile(counts.max(axis=1)[:, np.newaxis], test_scores.shape[1]) + * self.rating_max + ) + test_scores = rescale( + test_scores, + self.rating_min, + self.rating_max, + user_min_scores, + user_max_scores, + ) + + # remove items in the train set so recommended items are always novel + if remove_seen: + logger.info("Removing seen items") + test_scores += self.user_affinity[user_ids, :] * -np.inf + + return test_scores
+ +
[docs] def get_popularity_based_topk(self, top_k=10, sort_top_k=True, items=True): + """Get top K most frequently occurring items across all users. + + Args: + top_k (int): number of top items to recommend. + sort_top_k (bool): flag to sort top k results. + items (bool): if false, return most frequent users instead + + Returns: + pandas.DataFrame: top k most popular items. + """ + if items: + frequencies = self.item_frequencies + col = self.col_item + idx = self.index2item + else: + if self.user_frequencies is None: + self.user_frequencies = self.user_affinity.getnnz(axis=1).astype( + "int64" + ) + frequencies = self.user_frequencies + col = self.col_user + idx = self.index2user + + test_scores = np.array([frequencies]) + + logger.info("Getting top K") + top_components, top_scores = get_top_k_scored_items( + scores=test_scores, top_k=top_k, sort_top_k=sort_top_k + ) + + return pd.DataFrame( + { + col: [idx[item] for item in top_components.flatten()], + self.col_prediction: top_scores.flatten(), + } + )
+ +
[docs] def get_item_based_topk(self, items, top_k=10, sort_top_k=True): + """Get top K similar items to provided seed items based on similarity metric defined. + This method will take a set of items and use them to recommend the most similar items to that set + based on the similarity matrix fit during training. + This allows recommendations for cold-users (unseen during training), note - the model is not updated. + + The following options are possible based on information provided in the items input: + 1. Single user or seed of items: only item column (ratings are assumed to be 1) + 2. Single user or seed of items w/ ratings: item column and rating column + 3. Separate users or seeds of items: item and user column (user ids are only used to separate item sets) + 4. Separate users or seeds of items with ratings: item, user and rating columns provided + + Args: + items (pandas.DataFrame): DataFrame with item, user (optional), and rating (optional) columns + top_k (int): number of top items to recommend + sort_top_k (bool): flag to sort top k results + + Returns: + pandas.DataFrame: sorted top k recommendation items + """ + + # convert item ids to indices + item_ids = np.asarray( + list( + map( + lambda item: self.item2index.get(item, np.NaN), + items[self.col_item].values, + ) + ) + ) + + # if no ratings were provided assume they are all 1 + if self.col_rating in items.columns: + ratings = items[self.col_rating] + else: + ratings = pd.Series(np.ones_like(item_ids)) + + # create local map of user ids + if self.col_user in items.columns: + test_users = items[self.col_user] + user2index = {x[1]: x[0] for x in enumerate(items[self.col_user].unique())} + user_ids = test_users.map(user2index) + else: + # if no user column exists assume all entries are for a single user + test_users = pd.Series(np.zeros_like(item_ids)) + user_ids = test_users + n_users = user_ids.drop_duplicates().shape[0] + + # generate pseudo user affinity using seed items + pseudo_affinity = sparse.coo_matrix( + (ratings, (user_ids, item_ids)), shape=(n_users, self.n_items) + ).tocsr() + + # calculate raw scores with a matrix multiplication + test_scores = pseudo_affinity.dot(self.item_similarity) + + # remove items in the seed set so recommended items are novel + test_scores[user_ids, item_ids] = -np.inf + + top_items, top_scores = get_top_k_scored_items( + scores=test_scores, top_k=top_k, sort_top_k=sort_top_k + ) + + df = pd.DataFrame( + { + self.col_user: np.repeat( + test_users.drop_duplicates().values, top_items.shape[1] + ), + self.col_item: [self.index2item[item] for item in top_items.flatten()], + self.col_prediction: top_scores.flatten(), + } + ) + + # drop invalid items + return df.replace(-np.inf, np.nan).dropna()
+ +
[docs] def get_topk_most_similar_users(self, user, top_k, sort_top_k=True): + """Based on user affinity towards items, calculate the most similar users to the given user. + + Args: + user (int): user to retrieve most similar users for + top_k (int): number of top items to recommend + sort_top_k (bool): flag to sort top k results + + Returns: + pandas.DataFrame: top k most similar users and their scores + """ + user_idx = self.user2index[user] + similarities = self.user_affinity[user_idx].dot(self.user_affinity.T).toarray() + similarities[0, user_idx] = -np.inf + + top_items, top_scores = get_top_k_scored_items( + scores=similarities, top_k=top_k, sort_top_k=sort_top_k + ) + + df = pd.DataFrame( + { + self.col_user: [self.index2user[user] for user in top_items.flatten()], + self.col_prediction: top_scores.flatten(), + } + ) + + # drop invalid items + return df.replace(-np.inf, np.nan).dropna()
+ +
[docs] def recommend_k_items(self, test, top_k=10, sort_top_k=True, remove_seen=False): + """Recommend top K items for all users which are in the test set + + Args: + test (pandas.DataFrame): users to test + top_k (int): number of top items to recommend + sort_top_k (bool): flag to sort top k results + remove_seen (bool): flag to remove items seen in training from recommendation + + Returns: + pandas.DataFrame: top k recommendation items for each user + """ + + test_scores = self.score(test, remove_seen=remove_seen) + + top_items, top_scores = get_top_k_scored_items( + scores=test_scores, top_k=top_k, sort_top_k=sort_top_k + ) + + df = pd.DataFrame( + { + self.col_user: np.repeat( + test[self.col_user].drop_duplicates().values, top_items.shape[1] + ), + self.col_item: [self.index2item[item] for item in top_items.flatten()], + self.col_prediction: top_scores.flatten(), + } + ) + + # drop invalid items + return df.replace(-np.inf, np.nan).dropna()
+ +
[docs] def predict(self, test): + """Output SAR scores for only the users-items pairs which are in the test set + + Args: + test (pandas.DataFrame): DataFrame that contains users and items to test + + Returns: + pandas.DataFrame: DataFrame contains the prediction results + """ + + test_scores = self.score(test) + user_ids = np.asarray( + list( + map( + lambda user: self.user2index.get(user, np.NaN), + test[self.col_user].values, + ) + ) + ) + + # create mapping of new items to zeros + item_ids = np.asarray( + list( + map( + lambda item: self.item2index.get(item, np.NaN), + test[self.col_item].values, + ) + ) + ) + nans = np.isnan(item_ids) + if any(nans): + logger.warning( + "Items found in test not seen during training, new items will have score of 0" + ) + test_scores = np.append(test_scores, np.zeros((self.n_users, 1)), axis=1) + item_ids[nans] = self.n_items + item_ids = item_ids.astype("int64") + + df = pd.DataFrame( + { + self.col_user: test[self.col_user].values, + self.col_item: test[self.col_item].values, + self.col_prediction: test_scores[user_ids, item_ids], + } + ) + return df
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/sasrec/model.html b/_modules/recommenders/models/sasrec/model.html new file mode 100644 index 0000000000..6599350a3c --- /dev/null +++ b/_modules/recommenders/models/sasrec/model.html @@ -0,0 +1,1231 @@ + + + + + + + + + + + recommenders.models.sasrec.model — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.sasrec.model

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import random
+import numpy as np
+from tqdm import tqdm
+import tensorflow as tf
+
+from recommenders.utils.timer import Timer
+
+
+
[docs]class MultiHeadAttention(tf.keras.layers.Layer): + """ + - Q (query), K (key) and V (value) are split into multiple heads (num_heads) + - each tuple (q, k, v) are fed to scaled_dot_product_attention + - all attention outputs are concatenated + """ + + def __init__(self, attention_dim, num_heads, dropout_rate): + """Initialize parameters. + + Args: + attention_dim (int): Dimension of the attention embeddings. + num_heads (int): Number of heads in the multi-head self-attention module. + dropout_rate (float): Dropout probability. + """ + super(MultiHeadAttention, self).__init__() + self.num_heads = num_heads + self.attention_dim = attention_dim + assert attention_dim % self.num_heads == 0 + self.dropout_rate = dropout_rate + + self.depth = attention_dim // self.num_heads + + self.Q = tf.keras.layers.Dense(self.attention_dim, activation=None) + self.K = tf.keras.layers.Dense(self.attention_dim, activation=None) + self.V = tf.keras.layers.Dense(self.attention_dim, activation=None) + self.dropout = tf.keras.layers.Dropout(self.dropout_rate) + +
[docs] def call(self, queries, keys): + """Model forward pass. + + Args: + queries (tf.Tensor): Tensor of queries. + keys (tf.Tensor): Tensor of keys + + Returns: + tf.Tensor: Output tensor. + """ + + # Linear projections + Q = self.Q(queries) # (N, T_q, C) + K = self.K(keys) # (N, T_k, C) + V = self.V(keys) # (N, T_k, C) + + # --- MULTI HEAD --- + # Split and concat, Q_, K_ and V_ are all (h*N, T_q, C/h) + Q_ = tf.concat(tf.split(Q, self.num_heads, axis=2), axis=0) + K_ = tf.concat(tf.split(K, self.num_heads, axis=2), axis=0) + V_ = tf.concat(tf.split(V, self.num_heads, axis=2), axis=0) + + # --- SCALED DOT PRODUCT --- + # Multiplication + outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k) + + # Scale + outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5) + + # Key Masking + key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k) + key_masks = tf.tile(key_masks, [self.num_heads, 1]) # (h*N, T_k) + key_masks = tf.tile( + tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1] + ) # (h*N, T_q, T_k) + + paddings = tf.ones_like(outputs) * (-(2**32) + 1) + # outputs, (h*N, T_q, T_k) + outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) + + # Future blinding (Causality) + diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k) + tril = tf.linalg.LinearOperatorLowerTriangular( + diag_vals + ).to_dense() # (T_q, T_k) + masks = tf.tile( + tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1] + ) # (h*N, T_q, T_k) + + paddings = tf.ones_like(masks) * (-(2**32) + 1) + # outputs, (h*N, T_q, T_k) + outputs = tf.where(tf.equal(masks, 0), paddings, outputs) + + # Activation + outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k) + + # Query Masking, query_masks (N, T_q) + query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) + query_masks = tf.tile(query_masks, [self.num_heads, 1]) # (h*N, T_q) + query_masks = tf.tile( + tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]] + ) # (h*N, T_q, T_k) + outputs *= query_masks # broadcasting. (N, T_q, C) + + # Dropouts + outputs = self.dropout(outputs) + + # Weighted sum + outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h) + + # --- MULTI HEAD --- + # concat heads + outputs = tf.concat( + tf.split(outputs, self.num_heads, axis=0), axis=2 + ) # (N, T_q, C) + + # Residual connection + outputs += queries + + return outputs
+ + +
[docs]class PointWiseFeedForward(tf.keras.layers.Layer): + """ + Convolution layers with residual connection + """ + + def __init__(self, conv_dims, dropout_rate): + """Initialize parameters. + + Args: + conv_dims (list): List of the dimensions of the Feedforward layer. + dropout_rate (float): Dropout probability. + """ + super(PointWiseFeedForward, self).__init__() + self.conv_dims = conv_dims + self.dropout_rate = dropout_rate + self.conv_layer1 = tf.keras.layers.Conv1D( + filters=self.conv_dims[0], kernel_size=1, activation="relu", use_bias=True + ) + self.conv_layer2 = tf.keras.layers.Conv1D( + filters=self.conv_dims[1], kernel_size=1, activation=None, use_bias=True + ) + self.dropout_layer = tf.keras.layers.Dropout(self.dropout_rate) + +
[docs] def call(self, x): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + + Returns: + tf.Tensor: Output tensor. + """ + + output = self.conv_layer1(x) + output = self.dropout_layer(output) + + output = self.conv_layer2(output) + output = self.dropout_layer(output) + + # Residual connection + output += x + + return output
+ + +
[docs]class EncoderLayer(tf.keras.layers.Layer): + """ + Transformer based encoder layer + + """ + + def __init__( + self, + seq_max_len, + embedding_dim, + attention_dim, + num_heads, + conv_dims, + dropout_rate, + ): + """Initialize parameters. + + Args: + seq_max_len (int): Maximum sequence length. + embedding_dim (int): Embedding dimension. + attention_dim (int): Dimension of the attention embeddings. + num_heads (int): Number of heads in the multi-head self-attention module. + conv_dims (list): List of the dimensions of the Feedforward layer. + dropout_rate (float): Dropout probability. + """ + super(EncoderLayer, self).__init__() + + self.seq_max_len = seq_max_len + self.embedding_dim = embedding_dim + + self.mha = MultiHeadAttention(attention_dim, num_heads, dropout_rate) + self.ffn = PointWiseFeedForward(conv_dims, dropout_rate) + + self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6) + self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6) + + self.dropout1 = tf.keras.layers.Dropout(dropout_rate) + self.dropout2 = tf.keras.layers.Dropout(dropout_rate) + + self.layer_normalization = LayerNormalization( + self.seq_max_len, self.embedding_dim, 1e-08 + ) + +
[docs] def call_(self, x, training, mask): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + training (tf.Tensor): Training tensor. + mask (tf.Tensor): Mask tensor. + + Returns: + tf.Tensor: Output tensor. + """ + + attn_output = self.mha(queries=self.layer_normalization(x), keys=x) + attn_output = self.dropout1(attn_output, training=training) + out1 = self.layernorm1(x + attn_output) + + # feed forward network + ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model) + ffn_output = self.dropout2(ffn_output, training=training) + out2 = self.layernorm2( + out1 + ffn_output + ) # (batch_size, input_seq_len, d_model) + + # masking + out2 *= mask + + return out2
+ +
[docs] def call(self, x, training, mask): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + training (tf.Tensor): Training tensor. + mask (tf.Tensor): Mask tensor. + + Returns: + tf.Tensor: Output tensor. + """ + + x_norm = self.layer_normalization(x) + attn_output = self.mha(queries=x_norm, keys=x) + attn_output = self.ffn(attn_output) + out = attn_output * mask + + return out
+ + +
[docs]class Encoder(tf.keras.layers.Layer): + """ + Invokes Transformer based encoder with user defined number of layers + + """ + + def __init__( + self, + num_layers, + seq_max_len, + embedding_dim, + attention_dim, + num_heads, + conv_dims, + dropout_rate, + ): + """Initialize parameters. + + Args: + num_layers (int): Number of layers. + seq_max_len (int): Maximum sequence length. + embedding_dim (int): Embedding dimension. + attention_dim (int): Dimension of the attention embeddings. + num_heads (int): Number of heads in the multi-head self-attention module. + conv_dims (list): List of the dimensions of the Feedforward layer. + dropout_rate (float): Dropout probability. + """ + super(Encoder, self).__init__() + + self.num_layers = num_layers + + self.enc_layers = [ + EncoderLayer( + seq_max_len, + embedding_dim, + attention_dim, + num_heads, + conv_dims, + dropout_rate, + ) + for _ in range(num_layers) + ] + + self.dropout = tf.keras.layers.Dropout(dropout_rate) + +
[docs] def call(self, x, training, mask): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + training (tf.Tensor): Training tensor. + mask (tf.Tensor): Mask tensor. + + Returns: + tf.Tensor: Output tensor. + """ + + for i in range(self.num_layers): + x = self.enc_layers[i](x, training, mask) + + return x # (batch_size, input_seq_len, d_model)
+ + +
[docs]class LayerNormalization(tf.keras.layers.Layer): + """ + Layer normalization using mean and variance + gamma and beta are the learnable parameters + """ + + def __init__(self, seq_max_len, embedding_dim, epsilon): + """Initialize parameters. + + Args: + seq_max_len (int): Maximum sequence length. + embedding_dim (int): Embedding dimension. + epsilon (float): Epsilon value. + """ + super(LayerNormalization, self).__init__() + self.seq_max_len = seq_max_len + self.embedding_dim = embedding_dim + self.epsilon = epsilon + self.params_shape = (self.seq_max_len, self.embedding_dim) + g_init = tf.ones_initializer() + self.gamma = tf.Variable( + initial_value=g_init(shape=self.params_shape, dtype="float32"), + trainable=True, + ) + b_init = tf.zeros_initializer() + self.beta = tf.Variable( + initial_value=b_init(shape=self.params_shape, dtype="float32"), + trainable=True, + ) + +
[docs] def call(self, x): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + + Returns: + tf.Tensor: Output tensor. + """ + mean, variance = tf.nn.moments(x, [-1], keepdims=True) + normalized = (x - mean) / ((variance + self.epsilon) ** 0.5) + output = self.gamma * normalized + self.beta + return output
+ + +
[docs]class SASREC(tf.keras.Model): + """SAS Rec model + Self-Attentive Sequential Recommendation Using Transformer + + :Citation: + + Wang-Cheng Kang, Julian McAuley (2018), Self-Attentive Sequential + Recommendation. Proceedings of IEEE International Conference on + Data Mining (ICDM'18) + + Original source code from nnkkmto/SASRec-tf2, + https://github.com/nnkkmto/SASRec-tf2 + + """ + + def __init__(self, **kwargs): + """Model initialization. + + Args: + item_num (int): Number of items in the dataset. + seq_max_len (int): Maximum number of items in user history. + num_blocks (int): Number of Transformer blocks to be used. + embedding_dim (int): Item embedding dimension. + attention_dim (int): Transformer attention dimension. + conv_dims (list): List of the dimensions of the Feedforward layer. + dropout_rate (float): Dropout rate. + l2_reg (float): Coefficient of the L2 regularization. + num_neg_test (int): Number of negative examples used in testing. + """ + super(SASREC, self).__init__() + + self.item_num = kwargs.get("item_num", None) + self.seq_max_len = kwargs.get("seq_max_len", 100) + self.num_blocks = kwargs.get("num_blocks", 2) + self.embedding_dim = kwargs.get("embedding_dim", 100) + self.attention_dim = kwargs.get("attention_dim", 100) + self.attention_num_heads = kwargs.get("attention_num_heads", 1) + self.conv_dims = kwargs.get("conv_dims", [100, 100]) + self.dropout_rate = kwargs.get("dropout_rate", 0.5) + self.l2_reg = kwargs.get("l2_reg", 0.0) + self.num_neg_test = kwargs.get("num_neg_test", 100) + + self.item_embedding_layer = tf.keras.layers.Embedding( + self.item_num + 1, + self.embedding_dim, + name="item_embeddings", + mask_zero=True, + embeddings_regularizer=tf.keras.regularizers.L2(self.l2_reg), + ) + + self.positional_embedding_layer = tf.keras.layers.Embedding( + self.seq_max_len, + self.embedding_dim, + name="positional_embeddings", + mask_zero=False, + embeddings_regularizer=tf.keras.regularizers.L2(self.l2_reg), + ) + self.dropout_layer = tf.keras.layers.Dropout(self.dropout_rate) + self.encoder = Encoder( + self.num_blocks, + self.seq_max_len, + self.embedding_dim, + self.attention_dim, + self.attention_num_heads, + self.conv_dims, + self.dropout_rate, + ) + self.mask_layer = tf.keras.layers.Masking(mask_value=0) + self.layer_normalization = LayerNormalization( + self.seq_max_len, self.embedding_dim, 1e-08 + ) + +
[docs] def embedding(self, input_seq): + """Compute the sequence and positional embeddings. + + Args: + input_seq (tf.Tensor): Input sequence + + Returns: + tf.Tensor, tf.Tensor: + - Sequence embeddings. + - Positional embeddings. + """ + + seq_embeddings = self.item_embedding_layer(input_seq) + seq_embeddings = seq_embeddings * (self.embedding_dim**0.5) + + # FIXME + positional_seq = tf.expand_dims(tf.range(tf.shape(input_seq)[1]), 0) + positional_seq = tf.tile(positional_seq, [tf.shape(input_seq)[0], 1]) + positional_embeddings = self.positional_embedding_layer(positional_seq) + + return seq_embeddings, positional_embeddings
+ +
[docs] def call(self, x, training): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + training (tf.Tensor): Training tensor. + + Returns: + tf.Tensor, tf.Tensor, tf.Tensor: + - Logits of the positive examples. + - Logits of the negative examples. + - Mask for nonzero targets + """ + + input_seq = x["input_seq"] + pos = x["positive"] + neg = x["negative"] + + mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) + seq_embeddings, positional_embeddings = self.embedding(input_seq) + + # add positional embeddings + seq_embeddings += positional_embeddings + + # dropout + seq_embeddings = self.dropout_layer(seq_embeddings) + + # masking + seq_embeddings *= mask + + # --- ATTENTION BLOCKS --- + seq_attention = seq_embeddings + seq_attention = self.encoder(seq_attention, training, mask) + seq_attention = self.layer_normalization(seq_attention) # (b, s, d) + + # --- PREDICTION LAYER --- + # user's sequence embedding + pos = self.mask_layer(pos) + neg = self.mask_layer(neg) + + pos = tf.reshape(pos, [tf.shape(input_seq)[0] * self.seq_max_len]) + neg = tf.reshape(neg, [tf.shape(input_seq)[0] * self.seq_max_len]) + pos_emb = self.item_embedding_layer(pos) + neg_emb = self.item_embedding_layer(neg) + seq_emb = tf.reshape( + seq_attention, + [tf.shape(input_seq)[0] * self.seq_max_len, self.embedding_dim], + ) # (b*s, d) + + pos_logits = tf.reduce_sum(pos_emb * seq_emb, -1) + neg_logits = tf.reduce_sum(neg_emb * seq_emb, -1) + + pos_logits = tf.expand_dims(pos_logits, axis=-1) # (bs, 1) + # pos_prob = tf.keras.layers.Dense(1, activation='sigmoid')(pos_logits) # (bs, 1) + + neg_logits = tf.expand_dims(neg_logits, axis=-1) # (bs, 1) + # neg_prob = tf.keras.layers.Dense(1, activation='sigmoid')(neg_logits) # (bs, 1) + + # output = tf.concat([pos_logits, neg_logits], axis=0) + + # masking for loss calculation + istarget = tf.reshape( + tf.cast(tf.not_equal(pos, 0), dtype=tf.float32), + [tf.shape(input_seq)[0] * self.seq_max_len], + ) + + return pos_logits, neg_logits, istarget
+ +
[docs] def predict(self, inputs): + """Returns the logits for the test items. + + Args: + inputs (tf.Tensor): Input tensor. + + Returns: + tf.Tensor: Output tensor. + """ + training = False + input_seq = inputs["input_seq"] + candidate = inputs["candidate"] + + mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) + seq_embeddings, positional_embeddings = self.embedding(input_seq) + seq_embeddings += positional_embeddings + # seq_embeddings = self.dropout_layer(seq_embeddings) + seq_embeddings *= mask + seq_attention = seq_embeddings + seq_attention = self.encoder(seq_attention, training, mask) + seq_attention = self.layer_normalization(seq_attention) # (b, s, d) + seq_emb = tf.reshape( + seq_attention, + [tf.shape(input_seq)[0] * self.seq_max_len, self.embedding_dim], + ) # (b*s, d) + candidate_emb = self.item_embedding_layer(candidate) # (b, s, d) + candidate_emb = tf.transpose(candidate_emb, perm=[0, 2, 1]) # (b, d, s) + + test_logits = tf.matmul(seq_emb, candidate_emb) + # (200, 100) * (1, 101, 100)' + + test_logits = tf.reshape( + test_logits, + [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], + ) # (1, 200, 101) + test_logits = test_logits[:, -1, :] # (1, 101) + return test_logits
+ +
[docs] def loss_function(self, pos_logits, neg_logits, istarget): + """Losses are calculated separately for the positive and negative + items based on the corresponding logits. A mask is included to + take care of the zero items (added for padding). + + Args: + pos_logits (tf.Tensor): Logits of the positive examples. + neg_logits (tf.Tensor): Logits of the negative examples. + istarget (tf.Tensor): Mask for nonzero targets. + + Returns: + float: Loss. + """ + + pos_logits = pos_logits[:, 0] + neg_logits = neg_logits[:, 0] + + # ignore padding items (0) + # istarget = tf.reshape( + # tf.cast(tf.not_equal(self.pos, 0), dtype=tf.float32), + # [tf.shape(self.input_seq)[0] * self.seq_max_len], + # ) + # for logits + loss = tf.reduce_sum( + -tf.math.log(tf.math.sigmoid(pos_logits) + 1e-24) * istarget + - tf.math.log(1 - tf.math.sigmoid(neg_logits) + 1e-24) * istarget + ) / tf.reduce_sum(istarget) + + # for probabilities + # loss = tf.reduce_sum( + # - tf.math.log(pos_logits + 1e-24) * istarget - + # tf.math.log(1 - neg_logits + 1e-24) * istarget + # ) / tf.reduce_sum(istarget) + reg_loss = tf.compat.v1.losses.get_regularization_loss() + # reg_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) + # loss += sum(reg_losses) + loss += reg_loss + + return loss
+ +
[docs] def create_combined_dataset(self, u, seq, pos, neg): + """ + function to create model inputs from sampled batch data. + This function is used only during training. + """ + inputs = {} + seq = tf.keras.preprocessing.sequence.pad_sequences( + seq, padding="pre", truncating="pre", maxlen=self.seq_max_len + ) + pos = tf.keras.preprocessing.sequence.pad_sequences( + pos, padding="pre", truncating="pre", maxlen=self.seq_max_len + ) + neg = tf.keras.preprocessing.sequence.pad_sequences( + neg, padding="pre", truncating="pre", maxlen=self.seq_max_len + ) + + inputs["users"] = np.expand_dims(np.array(u), axis=-1) + inputs["input_seq"] = seq + inputs["positive"] = pos + inputs["negative"] = neg + + target = np.concatenate( + [ + np.repeat(1, seq.shape[0] * seq.shape[1]), + np.repeat(0, seq.shape[0] * seq.shape[1]), + ], + axis=0, + ) + target = np.expand_dims(target, axis=-1) + return inputs, target
+ +
[docs] def train(self, dataset, sampler, **kwargs): + """ + High level function for model training as well as + evaluation on the validation and test dataset + """ + num_epochs = kwargs.get("num_epochs", 10) + batch_size = kwargs.get("batch_size", 128) + lr = kwargs.get("learning_rate", 0.001) + val_epoch = kwargs.get("val_epoch", 5) + + num_steps = int(len(dataset.user_train) / batch_size) + + optimizer = tf.keras.optimizers.Adam( + learning_rate=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-7 + ) + + loss_function = self.loss_function + + train_loss = tf.keras.metrics.Mean(name="train_loss") + + train_step_signature = [ + { + "users": tf.TensorSpec(shape=(None, 1), dtype=tf.int64), + "input_seq": tf.TensorSpec( + shape=(None, self.seq_max_len), dtype=tf.int64 + ), + "positive": tf.TensorSpec( + shape=(None, self.seq_max_len), dtype=tf.int64 + ), + "negative": tf.TensorSpec( + shape=(None, self.seq_max_len), dtype=tf.int64 + ), + }, + tf.TensorSpec(shape=(None, 1), dtype=tf.int64), + ] + + @tf.function(input_signature=train_step_signature) + def train_step(inp, tar): + with tf.GradientTape() as tape: + pos_logits, neg_logits, loss_mask = self(inp, training=True) + loss = loss_function(pos_logits, neg_logits, loss_mask) + + gradients = tape.gradient(loss, self.trainable_variables) + optimizer.apply_gradients(zip(gradients, self.trainable_variables)) + + train_loss(loss) + return loss + + T = 0.0 + t0 = Timer() + t0.start() + + for epoch in range(1, num_epochs + 1): + + step_loss = [] + train_loss.reset_states() + for step in tqdm( + range(num_steps), total=num_steps, ncols=70, leave=False, unit="b" + ): + + u, seq, pos, neg = sampler.next_batch() + + inputs, target = self.create_combined_dataset(u, seq, pos, neg) + + loss = train_step(inputs, target) + step_loss.append(loss) + + if epoch % val_epoch == 0: + t0.stop() + t1 = t0.interval + T += t1 + print("Evaluating...") + t_test = self.evaluate(dataset) + t_valid = self.evaluate_valid(dataset) + print( + f"\nepoch: {epoch}, time: {T}, valid (NDCG@10: {t_valid[0]}, HR@10: {t_valid[1]})" + ) + print( + f"epoch: {epoch}, time: {T}, test (NDCG@10: {t_test[0]}, HR@10: {t_test[1]})" + ) + t0.start() + + t_test = self.evaluate(dataset) + print(f"\nepoch: {epoch}, test (NDCG@10: {t_test[0]}, HR@10: {t_test[1]})") + + return t_test
+ +
[docs] def evaluate(self, dataset): + """ + Evaluation on the test users (users with at least 3 items) + """ + usernum = dataset.usernum + itemnum = dataset.itemnum + train = dataset.user_train # removing deepcopy + valid = dataset.user_valid + test = dataset.user_test + + NDCG = 0.0 + HT = 0.0 + valid_user = 0.0 + + if usernum > 10000: + users = random.sample(range(1, usernum + 1), 10000) + else: + users = range(1, usernum + 1) + + for u in tqdm(users, ncols=70, leave=False, unit="b"): + + if len(train[u]) < 1 or len(test[u]) < 1: + continue + + seq = np.zeros([self.seq_max_len], dtype=np.int32) + idx = self.seq_max_len - 1 + seq[idx] = valid[u][0] + idx -= 1 + for i in reversed(train[u]): + seq[idx] = i + idx -= 1 + if idx == -1: + break + rated = set(train[u]) + rated.add(0) + item_idx = [test[u][0]] + for _ in range(self.num_neg_test): + t = np.random.randint(1, itemnum + 1) + while t in rated: + t = np.random.randint(1, itemnum + 1) + item_idx.append(t) + + inputs = {} + inputs["user"] = np.expand_dims(np.array([u]), axis=-1) + inputs["input_seq"] = np.array([seq]) + inputs["candidate"] = np.array([item_idx]) + + # inverse to get descending sort + predictions = -1.0 * self.predict(inputs) + predictions = np.array(predictions) + predictions = predictions[0] + + rank = predictions.argsort().argsort()[0] + + valid_user += 1 + + if rank < 10: + NDCG += 1 / np.log2(rank + 2) + HT += 1 + + return NDCG / valid_user, HT / valid_user
+ +
[docs] def evaluate_valid(self, dataset): + """ + Evaluation on the validation users + """ + usernum = dataset.usernum + itemnum = dataset.itemnum + train = dataset.user_train # removing deepcopy + valid = dataset.user_valid + + NDCG = 0.0 + valid_user = 0.0 + HT = 0.0 + if usernum > 10000: + users = random.sample(range(1, usernum + 1), 10000) + else: + users = range(1, usernum + 1) + + for u in tqdm(users, ncols=70, leave=False, unit="b"): + if len(train[u]) < 1 or len(valid[u]) < 1: + continue + + seq = np.zeros([self.seq_max_len], dtype=np.int32) + idx = self.seq_max_len - 1 + for i in reversed(train[u]): + seq[idx] = i + idx -= 1 + if idx == -1: + break + + rated = set(train[u]) + rated.add(0) + item_idx = [valid[u][0]] + for _ in range(self.num_neg_test): + t = np.random.randint(1, itemnum + 1) + while t in rated: + t = np.random.randint(1, itemnum + 1) + item_idx.append(t) + + inputs = {} + inputs["user"] = np.expand_dims(np.array([u]), axis=-1) + inputs["input_seq"] = np.array([seq]) + inputs["candidate"] = np.array([item_idx]) + + # predictions = -model.predict(sess, [u], [seq], item_idx) + predictions = -1.0 * self.predict(inputs) + predictions = np.array(predictions) + predictions = predictions[0] + + rank = predictions.argsort().argsort()[0] + + valid_user += 1 + + if rank < 10: + NDCG += 1 / np.log2(rank + 2) + HT += 1 + + return NDCG / valid_user, HT / valid_user
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/sasrec/sampler.html b/_modules/recommenders/models/sasrec/sampler.html new file mode 100644 index 0000000000..29c3a4fc66 --- /dev/null +++ b/_modules/recommenders/models/sasrec/sampler.html @@ -0,0 +1,495 @@ + + + + + + + + + + + recommenders.models.sasrec.sampler — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.sasrec.sampler

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+# Original codes are from
+# https://github.com/kang205/SASRec/blob/master/sampler.py
+
+import numpy as np
+from multiprocessing import Process, Queue
+
+
+def random_neq(left, right, s):
+    t = np.random.randint(left, right)
+    while t in s:
+        t = np.random.randint(left, right)
+    return t
+
+
+
[docs]def sample_function( + user_train, usernum, itemnum, batch_size, maxlen, result_queue, seed +): + """Batch sampler that creates a sequence of negative items based on the + original sequence of items (positive) that the user has interacted with. + + Args: + user_train (dict): dictionary of training exampled for each user + usernum (int): number of users + itemnum (int): number of items + batch_size (int): batch size + maxlen (int): maximum input sequence length + result_queue (multiprocessing.Queue): queue for storing sample results + seed (int): seed for random generator + """ + + def sample(): + + user = np.random.randint(1, usernum + 1) + while len(user_train[user]) <= 1: + user = np.random.randint(1, usernum + 1) + + seq = np.zeros([maxlen], dtype=np.int32) + pos = np.zeros([maxlen], dtype=np.int32) + neg = np.zeros([maxlen], dtype=np.int32) + nxt = user_train[user][-1] + idx = maxlen - 1 + + ts = set(user_train[user]) + for i in reversed(user_train[user][:-1]): + seq[idx] = i + pos[idx] = nxt + if nxt != 0: + neg[idx] = random_neq(1, itemnum + 1, ts) + nxt = i + idx -= 1 + if idx == -1: + break + + return (user, seq, pos, neg) + + np.random.seed(seed) + while True: + one_batch = [] + for i in range(batch_size): + one_batch.append(sample()) + + result_queue.put(zip(*one_batch))
+ + +
[docs]class WarpSampler(object): + """Sampler object that creates an iterator for feeding batch data while training. + + Attributes: + User: dict, all the users (keys) with items as values + usernum: integer, total number of users + itemnum: integer, total number of items + batch_size (int): batch size + maxlen (int): maximum input sequence length + n_workers (int): number of workers for parallel execution + """ + + def __init__(self, User, usernum, itemnum, batch_size=64, maxlen=10, n_workers=1): + self.result_queue = Queue(maxsize=n_workers * 10) + self.processors = [] + for i in range(n_workers): + self.processors.append( + Process( + target=sample_function, + args=( + User, + usernum, + itemnum, + batch_size, + maxlen, + self.result_queue, + np.random.randint(2e9), + ), + ) + ) + self.processors[-1].daemon = True + self.processors[-1].start() + + def next_batch(self): + return self.result_queue.get() + + def close(self): + for p in self.processors: + p.terminate() + p.join()
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/sasrec/ssept.html b/_modules/recommenders/models/sasrec/ssept.html new file mode 100644 index 0000000000..9b2a18fb24 --- /dev/null +++ b/_modules/recommenders/models/sasrec/ssept.html @@ -0,0 +1,650 @@ + + + + + + + + + + + recommenders.models.sasrec.ssept — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.sasrec.ssept

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+from recommenders.models.sasrec.model import SASREC, Encoder, LayerNormalization
+
+
+
[docs]class SSEPT(SASREC): + """ + SSE-PT Model + + :Citation: + + Wu L., Li S., Hsieh C-J., Sharpnack J., SSE-PT: Sequential Recommendation + Via Personalized Transformer, RecSys, 2020. + TF 1.x codebase: https://github.com/SSE-PT/SSE-PT + TF 2.x codebase (SASREc): https://github.com/nnkkmto/SASRec-tf2 + """ + + def __init__(self, **kwargs): + """Model initialization. + + Args: + item_num (int): Number of items in the dataset. + seq_max_len (int): Maximum number of items in user history. + num_blocks (int): Number of Transformer blocks to be used. + embedding_dim (int): Item embedding dimension. + attention_dim (int): Transformer attention dimension. + conv_dims (list): List of the dimensions of the Feedforward layer. + dropout_rate (float): Dropout rate. + l2_reg (float): Coefficient of the L2 regularization. + num_neg_test (int): Number of negative examples used in testing. + user_num (int): Number of users in the dataset. + user_embedding_dim (int): User embedding dimension. + item_embedding_dim (int): Item embedding dimension. + """ + super().__init__(**kwargs) + + self.user_num = kwargs.get("user_num", None) # New + self.conv_dims = kwargs.get("conv_dims", [200, 200]) # modified + self.user_embedding_dim = kwargs.get( + "user_embedding_dim", self.embedding_dim + ) # extra + self.item_embedding_dim = kwargs.get("item_embedding_dim", self.embedding_dim) + self.hidden_units = self.item_embedding_dim + self.user_embedding_dim + + # New, user embedding + self.user_embedding_layer = tf.keras.layers.Embedding( + input_dim=self.user_num + 1, + output_dim=self.user_embedding_dim, + name="user_embeddings", + mask_zero=True, + input_length=1, + embeddings_regularizer=tf.keras.regularizers.L2(self.l2_reg), + ) + self.positional_embedding_layer = tf.keras.layers.Embedding( + self.seq_max_len, + self.user_embedding_dim + self.item_embedding_dim, # difference + name="positional_embeddings", + mask_zero=False, + embeddings_regularizer=tf.keras.regularizers.L2(self.l2_reg), + ) + self.dropout_layer = tf.keras.layers.Dropout(self.dropout_rate) + self.encoder = Encoder( + self.num_blocks, + self.seq_max_len, + self.hidden_units, + self.hidden_units, + self.attention_num_heads, + self.conv_dims, + self.dropout_rate, + ) + self.mask_layer = tf.keras.layers.Masking(mask_value=0) + self.layer_normalization = LayerNormalization( + self.seq_max_len, self.hidden_units, 1e-08 + ) + +
[docs] def call(self, x, training): + """Model forward pass. + + Args: + x (tf.Tensor): Input tensor. + training (tf.Tensor): Training tensor. + + Returns: + tf.Tensor, tf.Tensor, tf.Tensor: + - Logits of the positive examples. + - Logits of the negative examples. + - Mask for nonzero targets + """ + + users = x["users"] + input_seq = x["input_seq"] + pos = x["positive"] + neg = x["negative"] + + mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) + seq_embeddings, positional_embeddings = self.embedding(input_seq) + + # User Encoding + # u0_latent = self.user_embedding_layer(users[0]) + # u0_latent = u0_latent * (self.embedding_dim ** 0.5) + u_latent = self.user_embedding_layer(users) + u_latent = u_latent * (self.user_embedding_dim**0.5) # (b, 1, h) + # return users + + # replicate the user embedding for all the items + u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) + + seq_embeddings = tf.reshape( + tf.concat([seq_embeddings, u_latent], 2), + [tf.shape(input_seq)[0], -1, self.hidden_units], + ) + seq_embeddings += positional_embeddings + + # dropout + seq_embeddings = self.dropout_layer(seq_embeddings, training=training) + + # masking + seq_embeddings *= mask + + # --- ATTENTION BLOCKS --- + seq_attention = seq_embeddings # (b, s, h1 + h2) + + seq_attention = self.encoder(seq_attention, training, mask) + seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) + + # --- PREDICTION LAYER --- + # user's sequence embedding + pos = self.mask_layer(pos) + neg = self.mask_layer(neg) + + user_emb = tf.reshape( + u_latent, + [tf.shape(input_seq)[0] * self.seq_max_len, self.user_embedding_dim], + ) + pos = tf.reshape(pos, [tf.shape(input_seq)[0] * self.seq_max_len]) + neg = tf.reshape(neg, [tf.shape(input_seq)[0] * self.seq_max_len]) + pos_emb = self.item_embedding_layer(pos) + neg_emb = self.item_embedding_layer(neg) + + # Add user embeddings + pos_emb = tf.reshape(tf.concat([pos_emb, user_emb], 1), [-1, self.hidden_units]) + neg_emb = tf.reshape(tf.concat([neg_emb, user_emb], 1), [-1, self.hidden_units]) + + seq_emb = tf.reshape( + seq_attention, + [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], + ) # (b*s, d) + + pos_logits = tf.reduce_sum(pos_emb * seq_emb, -1) + neg_logits = tf.reduce_sum(neg_emb * seq_emb, -1) + + pos_logits = tf.expand_dims(pos_logits, axis=-1) # (bs, 1) + # pos_prob = tf.keras.layers.Dense(1, activation='sigmoid')(pos_logits) # (bs, 1) + + neg_logits = tf.expand_dims(neg_logits, axis=-1) # (bs, 1) + # neg_prob = tf.keras.layers.Dense(1, activation='sigmoid')(neg_logits) # (bs, 1) + + # output = tf.concat([pos_logits, neg_logits], axis=0) + + # masking for loss calculation + istarget = tf.reshape( + tf.cast(tf.not_equal(pos, 0), dtype=tf.float32), + [tf.shape(input_seq)[0] * self.seq_max_len], + ) + + return pos_logits, neg_logits, istarget
+ +
[docs] def predict(self, inputs): + """ + Model prediction for candidate (negative) items + + """ + training = False + user = inputs["user"] + input_seq = inputs["input_seq"] + candidate = inputs["candidate"] + + mask = tf.expand_dims(tf.cast(tf.not_equal(input_seq, 0), tf.float32), -1) + seq_embeddings, positional_embeddings = self.embedding(input_seq) # (1, s, h) + + u0_latent = self.user_embedding_layer(user) + u0_latent = u0_latent * (self.user_embedding_dim**0.5) # (1, 1, h) + u0_latent = tf.squeeze(u0_latent, axis=0) # (1, h) + test_user_emb = tf.tile(u0_latent, [1 + self.num_neg_test, 1]) # (101, h) + + u_latent = self.user_embedding_layer(user) + u_latent = u_latent * (self.user_embedding_dim**0.5) # (b, 1, h) + u_latent = tf.tile(u_latent, [1, tf.shape(input_seq)[1], 1]) # (b, s, h) + + seq_embeddings = tf.reshape( + tf.concat([seq_embeddings, u_latent], 2), + [tf.shape(input_seq)[0], -1, self.hidden_units], + ) + seq_embeddings += positional_embeddings # (b, s, h1 + h2) + + seq_embeddings *= mask + seq_attention = seq_embeddings + seq_attention = self.encoder(seq_attention, training, mask) + seq_attention = self.layer_normalization(seq_attention) # (b, s, h1+h2) + seq_emb = tf.reshape( + seq_attention, + [tf.shape(input_seq)[0] * self.seq_max_len, self.hidden_units], + ) # (b*s1, h1+h2) + + candidate_emb = self.item_embedding_layer(candidate) # (b, s2, h2) + candidate_emb = tf.squeeze(candidate_emb, axis=0) # (s2, h2) + candidate_emb = tf.reshape( + tf.concat([candidate_emb, test_user_emb], 1), [-1, self.hidden_units] + ) # (b*s2, h1+h2) + + candidate_emb = tf.transpose(candidate_emb, perm=[1, 0]) # (h1+h2, b*s2) + test_logits = tf.matmul(seq_emb, candidate_emb) # (b*s1, b*s2) + + test_logits = tf.reshape( + test_logits, + [tf.shape(input_seq)[0], self.seq_max_len, 1 + self.num_neg_test], + ) # (1, s, 101) + test_logits = test_logits[:, -1, :] # (1, 101) + return test_logits
+ +
[docs] def loss_function(self, pos_logits, neg_logits, istarget): + """Losses are calculated separately for the positive and negative + items based on the corresponding logits. A mask is included to + take care of the zero items (added for padding). + + Args: + pos_logits (tf.Tensor): Logits of the positive examples. + neg_logits (tf.Tensor): Logits of the negative examples. + istarget (tf.Tensor): Mask for nonzero targets. + + Returns: + float: Loss. + """ + + pos_logits = pos_logits[:, 0] + neg_logits = neg_logits[:, 0] + + # ignore padding items (0) + # istarget = tf.reshape( + # tf.cast(tf.not_equal(self.pos, 0), dtype=tf.float32), + # [tf.shape(self.input_seq)[0] * self.seq_max_len], + # ) + # for logits + loss = tf.reduce_sum( + -tf.math.log(tf.math.sigmoid(pos_logits) + 1e-24) * istarget + - tf.math.log(1 - tf.math.sigmoid(neg_logits) + 1e-24) * istarget + ) / tf.reduce_sum(istarget) + + # for probabilities + # loss = tf.reduce_sum( + # - tf.math.log(pos_logits + 1e-24) * istarget - + # tf.math.log(1 - neg_logits + 1e-24) * istarget + # ) / tf.reduce_sum(istarget) + reg_loss = tf.compat.v1.losses.get_regularization_loss() + # reg_losses = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES) + # loss += sum(reg_losses) + loss += reg_loss + + return loss
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/sasrec/util.html b/_modules/recommenders/models/sasrec/util.html new file mode 100644 index 0000000000..09f97522f8 --- /dev/null +++ b/_modules/recommenders/models/sasrec/util.html @@ -0,0 +1,496 @@ + + + + + + + + + + + recommenders.models.sasrec.util — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.sasrec.util

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+from collections import defaultdict
+
+
+
[docs]class SASRecDataSet: + """ + A class for creating SASRec specific dataset used during + train, validation and testing. + + Attributes: + usernum: integer, total number of users + itemnum: integer, total number of items + User: dict, all the users (keys) with items as values + Items: set of all the items + user_train: dict, subset of User that are used for training + user_valid: dict, subset of User that are used for validation + user_test: dict, subset of User that are used for testing + col_sep: column separator in the data file + filename: data filename + """ + + def __init__(self, **kwargs): + self.usernum = 0 + self.itemnum = 0 + self.User = defaultdict(list) + self.Items = set() + self.user_train = {} + self.user_valid = {} + self.user_test = {} + self.col_sep = kwargs.get("col_sep", " ") + self.filename = kwargs.get("filename", None) + + if self.filename: + with open(self.filename, "r") as fr: + sample = fr.readline() + ncols = sample.strip().split(self.col_sep) + if ncols == 3: + self.with_time = True + else: + self.with_time = False + + def split(self, **kwargs): + self.filename = kwargs.get("filename", self.filename) + if not self.filename: + raise ValueError("Filename is required") + + if self.with_time: + self.data_partition_with_time() + else: + self.data_partition() + + def data_partition(self): + # assume user/item index starting from 1 + f = open(self.filename, "r") + for line in f: + u, i = line.rstrip().split(self.col_sep) + u = int(u) + i = int(i) + self.usernum = max(u, self.usernum) + self.itemnum = max(i, self.itemnum) + self.User[u].append(i) + + for user in self.User: + nfeedback = len(self.User[user]) + if nfeedback < 3: + self.user_train[user] = self.User[user] + self.user_valid[user] = [] + self.user_test[user] = [] + else: + self.user_train[user] = self.User[user][:-2] + self.user_valid[user] = [] + self.user_valid[user].append(self.User[user][-2]) + self.user_test[user] = [] + self.user_test[user].append(self.User[user][-1]) + + def data_partition_with_time(self): + # assume user/item index starting from 1 + f = open(self.filename, "r") + for line in f: + u, i, t = line.rstrip().split(self.col_sep) + u = int(u) + i = int(i) + t = float(t) + self.usernum = max(u, self.usernum) + self.itemnum = max(i, self.itemnum) + self.User[u].append((i, t)) + self.Items.add(i) + + for user in self.User.keys(): + # sort by time + items = sorted(self.User[user], key=lambda x: x[1]) + # keep only the items + items = [x[0] for x in items] + self.User[user] = items + nfeedback = len(self.User[user]) + if nfeedback < 3: + self.user_train[user] = self.User[user] + self.user_valid[user] = [] + self.user_test[user] = [] + else: + self.user_train[user] = self.User[user][:-2] + self.user_valid[user] = [] + self.user_valid[user].append(self.User[user][-2]) + self.user_test[user] = [] + self.user_test[user].append(self.User[user][-1])
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/surprise/surprise_utils.html b/_modules/recommenders/models/surprise/surprise_utils.html new file mode 100644 index 0000000000..3755d90aaf --- /dev/null +++ b/_modules/recommenders/models/surprise/surprise_utils.html @@ -0,0 +1,509 @@ + + + + + + + + + + + recommenders.models.surprise.surprise_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.surprise.surprise_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import pandas as pd
+import numpy as np
+
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_PREDICTION_COL,
+)
+from recommenders.utils.general_utils import invert_dictionary
+
+
+
[docs]def surprise_trainset_to_df( + trainset, col_user="uid", col_item="iid", col_rating="rating" +): + """Converts a `surprise.Trainset` object to `pandas.DataFrame` + + More info: https://surprise.readthedocs.io/en/stable/trainset.html + + Args: + trainset (object): A surprise.Trainset object. + col_user (str): User column name. + col_item (str): Item column name. + col_rating (str): Rating column name. + + Returns: + pandas.DataFrame: A dataframe with user column (str), item column (str), and rating column (float). + """ + df = pd.DataFrame(trainset.all_ratings(), columns=[col_user, col_item, col_rating]) + map_user = ( + trainset._inner2raw_id_users + if trainset._inner2raw_id_users is not None + else invert_dictionary(trainset._raw2inner_id_users) + ) + map_item = ( + trainset._inner2raw_id_items + if trainset._inner2raw_id_items is not None + else invert_dictionary(trainset._raw2inner_id_items) + ) + df[col_user] = df[col_user].map(map_user) + df[col_item] = df[col_item].map(map_item) + return df
+ + +
[docs]def predict( + algo, + data, + usercol=DEFAULT_USER_COL, + itemcol=DEFAULT_ITEM_COL, + predcol=DEFAULT_PREDICTION_COL, +): + """Computes predictions of an algorithm from Surprise on the data. Can be used for computing rating metrics like RMSE. + + Args: + algo (surprise.prediction_algorithms.algo_base.AlgoBase): an algorithm from Surprise + data (pandas.DataFrame): the data on which to predict + usercol (str): name of the user column + itemcol (str): name of the item column + + Returns: + pandas.DataFrame: Dataframe with usercol, itemcol, predcol + """ + predictions = [ + algo.predict(getattr(row, usercol), getattr(row, itemcol)) + for row in data.itertuples() + ] + predictions = pd.DataFrame(predictions) + predictions = predictions.rename( + index=str, columns={"uid": usercol, "iid": itemcol, "est": predcol} + ) + return predictions.drop(["details", "r_ui"], axis="columns")
+ + +
[docs]def compute_ranking_predictions( + algo, + data, + usercol=DEFAULT_USER_COL, + itemcol=DEFAULT_ITEM_COL, + predcol=DEFAULT_PREDICTION_COL, + remove_seen=False, +): + """Computes predictions of an algorithm from Surprise on all users and items in data. It can be used for computing + ranking metrics like NDCG. + + Args: + algo (surprise.prediction_algorithms.algo_base.AlgoBase): an algorithm from Surprise + data (pandas.DataFrame): the data from which to get the users and items + usercol (str): name of the user column + itemcol (str): name of the item column + remove_seen (bool): flag to remove (user, item) pairs seen in the training data + + Returns: + pandas.DataFrame: Dataframe with usercol, itemcol, predcol + """ + preds_lst = [] + users = data[usercol].unique() + items = data[itemcol].unique() + + for user in users: + for item in items: + preds_lst.append([user, item, algo.predict(user, item).est]) + + all_predictions = pd.DataFrame(data=preds_lst, columns=[usercol, itemcol, predcol]) + + if remove_seen: + tempdf = pd.concat( + [ + data[[usercol, itemcol]], + pd.DataFrame( + data=np.ones(data.shape[0]), columns=["dummycol"], index=data.index + ), + ], + axis=1, + ) + merged = pd.merge(tempdf, all_predictions, on=[usercol, itemcol], how="outer") + return merged[merged["dummycol"].isnull()].drop("dummycol", axis=1) + else: + return all_predictions
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/tfidf/tfidf_utils.html b/_modules/recommenders/models/tfidf/tfidf_utils.html new file mode 100644 index 0000000000..eed28a100c --- /dev/null +++ b/_modules/recommenders/models/tfidf/tfidf_utils.html @@ -0,0 +1,786 @@ + + + + + + + + + + + recommenders.models.tfidf.tfidf_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.tfidf.tfidf_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+
+from sklearn.feature_extraction.text import TfidfVectorizer
+from sklearn.metrics.pairwise import linear_kernel
+from transformers import BertTokenizer
+import re
+import unicodedata
+import pandas as pd
+import numpy as np
+
+import nltk
+from nltk.stem.porter import PorterStemmer
+
+
+
[docs]class TfidfRecommender: + """Term Frequency - Inverse Document Frequency (TF-IDF) Recommender + + This class provides content-based recommendations using TF-IDF vectorization in combination with cosine similarity. + """ + + def __init__(self, id_col, tokenization_method="scibert"): + """Initialize model parameters + + Args: + id_col (str): Name of column containing item IDs. + tokenization_method (str): ['none','nltk','bert','scibert'] option for tokenization method. + """ + self.id_col = id_col + if tokenization_method.lower() not in ["none", "nltk", "bert", "scibert"]: + raise ValueError( + 'Tokenization method must be one of ["none" | "nltk" | "bert" | "scibert"]' + ) + self.tokenization_method = tokenization_method.lower() + + # Initialize other variables used in this class + self.tf = TfidfVectorizer() + self.tfidf_matrix = dict() + self.tokens = dict() + self.stop_words = frozenset() + self.recommendations = dict() + self.top_k_recommendations = pd.DataFrame() + + def __clean_text(self, text, for_BERT=False, verbose=False): + """Clean text by removing HTML tags, symbols, and punctuation. + + Args: + text (str): Text to clean. + for_BERT (boolean): True or False for if this text is being cleaned for a BERT word tokenization method. + verbose (boolean): True or False for whether to print. + + Returns: + str: Cleaned version of text. + """ + + try: + # Normalize unicode + text_norm = unicodedata.normalize("NFC", text) + + # Remove HTML tags + clean = re.sub("<.*?>", "", text_norm) + + # Remove new line and tabs + clean = clean.replace("\n", " ") + clean = clean.replace("\t", " ") + clean = clean.replace("\r", " ") + clean = clean.replace(\xa0", "") # non-breaking space + + # Remove all punctuation and special characters + clean = re.sub( + r"([^\s\w]|_)+", "", clean + ) # noqa W695 invalid escape sequence '\s' + + # If you want to keep some punctuation, see below commented out example + # clean = re.sub(r'([^\s\w\-\_\(\)]|_)+','', clean) + + # Skip further processing if the text will be used in BERT tokenization + if for_BERT is False: + # Lower case + clean = clean.lower() + except Exception: + if verbose is True: + print("Cannot clean non-existent text") + clean = "" + + return clean + +
[docs] def clean_dataframe(self, df, cols_to_clean, new_col_name="cleaned_text"): + """Clean the text within the columns of interest and return a dataframe with cleaned and combined text. + + Args: + df (pandas.DataFrame): Dataframe containing the text content to clean. + cols_to_clean (list of str): List of columns to clean by name (e.g., ['abstract','full_text']). + new_col_name (str): Name of the new column that will contain the cleaned text. + + Returns: + pandas.DataFrame: Dataframe with cleaned text in the new column. + """ + # Collapse the table such that all descriptive text is just in a single column + df = df.replace(np.nan, "", regex=True) + df[new_col_name] = df[cols_to_clean].apply(lambda cols: " ".join(cols), axis=1) + + # Check if for BERT tokenization + if self.tokenization_method in ["bert", "scibert"]: + for_BERT = True + else: + for_BERT = False + + # Clean the text in the dataframe + df[new_col_name] = df[new_col_name].map( + lambda x: self.__clean_text(x, for_BERT) + ) + + return df
+ +
[docs] def tokenize_text( + self, df_clean, text_col="cleaned_text", ngram_range=(1, 3), min_df=0 + ): + """Tokenize the input text. + For more details on the TfidfVectorizer, see https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html + + Args: + df_clean (pandas.DataFrame): Dataframe with cleaned text in the new column. + text_col (str): Name of column containing the cleaned text. + ngram_range (tuple of int): The lower and upper boundary of the range of n-values for different n-grams to be extracted. + min_df (int): When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. + + Returns: + TfidfVectorizer, pandas.Series: + - Scikit-learn TfidfVectorizer object defined in `.tokenize_text()`. + - Each row contains tokens for respective documents separated by spaces. + """ + vectors = df_clean[text_col] + + # If a HuggingFace BERT word tokenization method + if self.tokenization_method in ["bert", "scibert"]: + # Set vectorizer + tf = TfidfVectorizer( + analyzer="word", + ngram_range=ngram_range, + min_df=min_df, + stop_words="english", + ) + + # Get appropriate transformer name + if self.tokenization_method == "bert": + bert_method = "bert-base-cased" + elif self.tokenization_method == "scibert": + bert_method = "allenai/scibert_scivocab_cased" + + # Load pre-trained model tokenizer (vocabulary) + tokenizer = BertTokenizer.from_pretrained(bert_method) + + # Loop through each item + vectors_tokenized = vectors.copy() + for i in range(0, len(vectors)): + vectors_tokenized[i] = " ".join(tokenizer.tokenize(vectors[i])) + + elif self.tokenization_method == "nltk": + # NLTK Stemming + token_dict = {} # noqa: F841 + stemmer = PorterStemmer() + + def stem_tokens(tokens, stemmer): + stemmed = [] + for item in tokens: + stemmed.append(stemmer.stem(item)) + return stemmed + + def tokenize(text): + tokens = nltk.word_tokenize(text) + stems = stem_tokens(tokens, stemmer) + return stems + + # When defining a custome tokenizer with TfidfVectorizer, the tokenization is applied in the fit function + tf = TfidfVectorizer( + tokenizer=tokenize, + analyzer="word", + ngram_range=ngram_range, + min_df=min_df, + stop_words="english", + ) + vectors_tokenized = vectors + + elif self.tokenization_method == "none": + # No tokenization applied + tf = TfidfVectorizer( + analyzer="word", + ngram_range=ngram_range, + min_df=min_df, + stop_words="english", + ) + vectors_tokenized = vectors + + # Save to class variable + self.tf = tf + + return tf, vectors_tokenized
+ +
[docs] def fit(self, tf, vectors_tokenized): + """Fit TF-IDF vectorizer to the cleaned and tokenized text. + + Args: + tf (TfidfVectorizer): sklearn.feature_extraction.text.TfidfVectorizer object defined in .tokenize_text(). + vectors_tokenized (pandas.Series): Each row contains tokens for respective documents separated by spaces. + """ + self.tfidf_matrix = tf.fit_transform(vectors_tokenized)
+ +
[docs] def get_tokens(self): + """Return the tokens generated by the TF-IDF vectorizer. + + Returns: + dict: Dictionary of tokens generated by the TF-IDF vectorizer. + """ + try: + self.tokens = self.tf.vocabulary_ + except Exception: + self.tokens = "Run .tokenize_text() and .fit_tfidf() first" + return self.tokens
+ +
[docs] def get_stop_words(self): + """Return the stop words excluded in the TF-IDF vectorizer. + + Returns: + list: Frozenset of stop words used by the TF-IDF vectorizer (can be converted to list). + """ + try: + self.stop_words = self.tf.get_stop_words() + except Exception: + self.stop_words = "Run .tokenize_text() and .fit_tfidf() first" + return self.stop_words
+ + def __create_full_recommendation_dictionary(self, df_clean): + """Create the full recommendation dictionary containing all recommendations for all items. + + Args: + pandas.DataFrame: Dataframe with cleaned text. + """ + + # Similarity measure + cosine_sim = linear_kernel(self.tfidf_matrix, self.tfidf_matrix) + + # sorted_idx has the indices that would sort the array. + sorted_idx = np.argsort(cosine_sim, axis=1) + + data = list(df_clean[self.id_col].values) + len_df_clean = len(df_clean) + + results = {} + for idx, row in zip(range(0, len_df_clean), data): + similar_indices = sorted_idx[idx][: -(len_df_clean + 1) : -1] + similar_items = [(cosine_sim[idx][i], data[i]) for i in similar_indices] + results[row] = similar_items[1:] + + # Save to class + self.recommendations = results + + def __organize_results_as_tabular(self, df_clean, k): + """Restructures results dictionary into a table containing only the top k recommendations per item. + + Args: + df_clean (pandas.DataFrame): Dataframe with cleaned text. + k (int): Number of recommendations to return. + """ + # Initialize new dataframe to hold recommendation output + item_id = list() + rec_rank = list() + rec_score = list() + rec_item_id = list() + + # For each item + for _item_id in self.recommendations: + # Information about the item we are basing recommendations off of + rec_based_on = tmp_item_id = _item_id + + # Get all scores and IDs for items recommended for this current item + rec_array = self.recommendations.get(rec_based_on) + tmp_rec_score = list(map(lambda x: x[0], rec_array)) + tmp_rec_id = list(map(lambda x: x[1], rec_array)) + + # Append multiple values at a time to list + item_id.extend([tmp_item_id] * k) + rec_rank.extend(list(range(1, k + 1))) + rec_score.extend(tmp_rec_score[:k]) + rec_item_id.extend(tmp_rec_id[:k]) + + # Save the output + output_dict = { + self.id_col: item_id, + "rec_rank": rec_rank, + "rec_score": rec_score, + "rec_" + self.id_col: rec_item_id, + } + + # Convert to dataframe + self.top_k_recommendations = pd.DataFrame(output_dict) + +
[docs] def recommend_top_k_items(self, df_clean, k=5): + """Recommend k number of items similar to the item of interest. + + Args: + df_clean (pandas.DataFrame): Dataframe with cleaned text. + k (int): Number of recommendations to return. + + Returns: + pandas.DataFrame: Dataframe containing id of top k recommendations for all items. + """ + if k > len(df_clean) - 1: + raise ValueError( + "Cannot get more recommendations than there are items. Set k lower." + ) + self.__create_full_recommendation_dictionary(df_clean) + self.__organize_results_as_tabular(df_clean, k) + + return self.top_k_recommendations
+ + def __get_single_item_info(self, metadata, rec_id): + """Get full information for a single recommended item. + + Args: + metadata (pandas.DataFrame): Dataframe containing item info. + rec_id (str): Identifier for recommended item. + + Returns: + pandas.Series: Single row from dataframe containing recommended item info. + """ + + # Return row + rec_info = metadata.iloc[int(np.where(metadata[self.id_col] == rec_id)[0])] + + return rec_info + + def __make_clickable(self, address): + """Make URL clickable. + + Args: + address (str): URL address to make clickable. + """ + return '<a href="{0}">{0}</a>'.format(address) + +
[docs] def get_top_k_recommendations( + self, metadata, query_id, cols_to_keep=[], verbose=True + ): + """Return the top k recommendations with useful metadata for each recommendation. + + Args: + metadata (pandas.DataFrame): Dataframe holding metadata for all public domain papers. + query_id (str): ID of item of interest. + cols_to_keep (list of str): List of columns from the metadata dataframe to include + (e.g., ['title','authors','journal','publish_time','url']). + By default, all columns are kept. + verbose (boolean): Set to True if you want to print the table. + + Returns: + pandas.Styler: Stylized dataframe holding recommendations and associated metadata just for the item of interest (can access as normal dataframe by using df.data). + """ + + # Create subset of dataframe with just recommendations for the item of interest + df = self.top_k_recommendations.loc[ + self.top_k_recommendations[self.id_col] == query_id + ].reset_index() + + # Remove id_col of query item + df.drop([self.id_col], axis=1, inplace=True) + + # Add metadata for each recommended item (rec_<id_col>) + metadata_cols = metadata.columns.values + df[metadata_cols] = df.apply( + lambda row: self.__get_single_item_info( + metadata, row["rec_" + self.id_col] + ), + axis=1, + ) + + # Remove id col added from metadata (already present from self.top_k_recommendations) + df.drop([self.id_col], axis=1, inplace=True) + + # Rename columns such that rec_ is no longer appended, for simplicity + df = df.rename(columns={"rec_rank": "rank", "rec_score": "similarity_score"}) + + # Only keep columns of interest + if len(cols_to_keep) > 0: + # Insert our recommendation scoring/ranking columns + cols_to_keep.insert(0, "similarity_score") + cols_to_keep.insert(0, "rank") + df = df[cols_to_keep] + + # Make URLs clickable if they exist + if "url" in list(map(lambda x: x.lower(), metadata_cols)): + format_ = {"url": self.__make_clickable} + df = df.head().style.format(format_) + + if verbose: + df + + return df
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/vae/multinomial_vae.html b/_modules/recommenders/models/vae/multinomial_vae.html new file mode 100644 index 0000000000..d81b0408fc --- /dev/null +++ b/_modules/recommenders/models/vae/multinomial_vae.html @@ -0,0 +1,919 @@ + + + + + + + + + + + recommenders.models.vae.multinomial_vae — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.vae.multinomial_vae

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+from recommenders.evaluation.python_evaluation import ndcg_at_k
+
+import tensorflow as tf
+from tensorflow.keras.layers import *
+from tensorflow.keras.models import Model
+from tensorflow.keras import backend as K
+from tensorflow.keras.callbacks import ReduceLROnPlateau, Callback
+
+
+
[docs]class LossHistory(Callback): + """This class is used for saving the validation loss and the training loss per epoch.""" + +
[docs] def on_train_begin(self, logs={}): + """Initialise the lists where the loss of training and validation will be saved.""" + self.losses = [] + self.val_losses = []
+ +
[docs] def on_epoch_end(self, epoch, logs={}): + """Save the loss of training and validation set at the end of each epoch.""" + self.losses.append(logs.get("loss")) + self.val_losses.append(logs.get("val_loss"))
+ + +
[docs]class Metrics(Callback): + """Callback function used to calculate the NDCG@k metric of validation set at the end of each epoch. + Weights of the model with the highest NDCG@k value is saved.""" + + def __init__(self, model, val_tr, val_te, mapper, k, save_path=None): + + """Initialize the class parameters. + + Args: + model: trained model for validation. + val_tr (numpy.ndarray, float): the click matrix for the validation set training part. + val_te (numpy.ndarray, float): the click matrix for the validation set testing part. + mapper (AffinityMatrix): the mapper for converting click matrix to dataframe. + k (int): number of top k items per user (optional). + save_path (str): Default path to save weights. + """ + # Model + self.model = model + + # Initial value of NDCG + self.best_ndcg = 0.0 + + # Validation data: training and testing parts + self.val_tr = val_tr + self.val_te = val_te + + # Mapper for converting from sparse matrix to dataframe + self.mapper = mapper + + # Top k items to recommend + self.k = k + + # Options to save the weights of the model for future use + self.save_path = save_path + +
[docs] def on_train_begin(self, logs={}): + """Initialise the list for validation NDCG@k.""" + self._data = []
+ +
[docs] def recommend_k_items(self, x, k, remove_seen=True): + """Returns the top-k items ordered by a relevancy score. + Obtained probabilities are used as recommendation score. + + Args: + x (numpy.ndarray, int32): input click matrix. + k (scalar, int32): the number of items to recommend. + + Returns: + numpy.ndarray: A sparse matrix containing the top_k elements ordered by their score. + + """ + # obtain scores + score = self.model.predict(x) + + if remove_seen: + # if true, it removes items from the train set by setting them to zero + seen_mask = np.not_equal(x, 0) + score[seen_mask] = 0 + + # get the top k items + top_items = np.argpartition(-score, range(k), axis=1)[:, :k] + + # get a copy of the score matrix + score_c = score.copy() + + # set to zero the k elements + score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0 + + # set to zeros all elements other then the k + top_scores = score - score_c + + return top_scores
+ +
[docs] def on_epoch_end(self, batch, logs={}): + """At the end of each epoch calculate NDCG@k of the validation set. + + If the model performance is improved, the model weights are saved. + Update the list of validation NDCG@k by adding obtained value + + """ + # recommend top k items based on training part of validation set + top_k = self.recommend_k_items(x=self.val_tr, k=self.k, remove_seen=True) + + # convert recommendations from sparse matrix to dataframe + top_k_df = self.mapper.map_back_sparse(top_k, kind="prediction") + test_df = self.mapper.map_back_sparse(self.val_te, kind="ratings") + + # calculate NDCG@k + NDCG = ndcg_at_k(test_df, top_k_df, col_prediction="prediction", k=self.k) + + # check if there is an improvement in NDCG, if so, update the weights of the saved model + if NDCG > self.best_ndcg: + self.best_ndcg = NDCG + + # save the weights of the optimal model + if self.save_path is not None: + self.model.save(self.save_path) + + self._data.append(NDCG)
+ +
[docs] def get_data(self): + """Returns a list of the NDCG@k of the validation set metrics calculated + at the end of each epoch.""" + return self._data
+ + +
[docs]class AnnealingCallback(Callback): + """This class is used for updating the value of β during the annealing process. + When β reaches the value of anneal_cap, it stops increasing.""" + + def __init__(self, beta, anneal_cap, total_anneal_steps): + + """Constructor + + Args: + beta (float): current value of beta. + anneal_cap (float): maximum value that beta can reach. + total_anneal_steps (int): total number of annealing steps. + """ + # maximum value that beta can take + self.anneal_cap = anneal_cap + + # initial value of beta + self.beta = beta + + # update_count used for calculating the updated value of beta + self.update_count = 0 + + # total annealing steps + self.total_anneal_steps = total_anneal_steps + +
[docs] def on_train_begin(self, logs={}): + """Initialise a list in which the beta value will be saved at the end of each epoch.""" + self._beta = []
+ +
[docs] def on_batch_end(self, epoch, logs={}): + """At the end of each batch the beta should is updated until it reaches the values of anneal cap.""" + self.update_count = self.update_count + 1 + + new_beta = min( + 1.0 * self.update_count / self.total_anneal_steps, self.anneal_cap + ) + + K.set_value(self.beta, new_beta)
+ +
[docs] def on_epoch_end(self, epoch, logs={}): + """At the end of each epoch save the value of beta in _beta list.""" + tmp = K.eval(self.beta) + self._beta.append(tmp)
+ +
[docs] def get_data(self): + """Returns a list of the beta values per epoch.""" + return self._beta
+ + +
[docs]class Mult_VAE: + """Multinomial Variational Autoencoders (Multi-VAE) for Collaborative Filtering implementation + + :Citation: + + Liang, Dawen, et al. "Variational autoencoders for collaborative filtering." + Proceedings of the 2018 World Wide Web Conference. 2018. + https://arxiv.org/pdf/1802.05814.pdf + """ + + def __init__( + self, + n_users, + original_dim, + intermediate_dim=200, + latent_dim=70, + n_epochs=400, + batch_size=100, + k=100, + verbose=1, + drop_encoder=0.5, + drop_decoder=0.5, + beta=1.0, + annealing=False, + anneal_cap=1.0, + seed=None, + save_path=None, + ): + + """Constructor + + Args: + n_users (int): Number of unique users in the train set. + original_dim (int): Number of unique items in the train set. + intermediate_dim (int): Dimension of intermediate space. + latent_dim (int): Dimension of latent space. + n_epochs (int): Number of epochs for training. + batch_size (int): Batch size. + k (int): number of top k items per user. + verbose (int): Whether to show the training output or not. + drop_encoder (float): Dropout percentage of the encoder. + drop_decoder (float): Dropout percentage of the decoder. + beta (float): a constant parameter β in the ELBO function, + when you are not using annealing (annealing=False) + annealing (bool): option of using annealing method for training the model (True) + or not using annealing, keeping a constant beta (False) + anneal_cap (float): maximum value that beta can take during annealing process. + seed (int): Seed. + save_path (str): Default path to save weights. + """ + # Seed + self.seed = seed + np.random.seed(self.seed) + + # Parameters + self.n_users = n_users + self.original_dim = original_dim + self.intermediate_dim = intermediate_dim + self.latent_dim = latent_dim + self.n_epochs = n_epochs + self.batch_size = batch_size + self.k = k + self.verbose = verbose + + # Compute samples per epoch + self.number_of_batches = self.n_users // self.batch_size + + # Annealing parameters + self.anneal_cap = anneal_cap + self.annealing = annealing + + if self.annealing: + self.beta = K.variable(0.0) + else: + self.beta = beta + + # Compute total annealing steps + self.total_anneal_steps = ( + self.number_of_batches + * (self.n_epochs - int(self.n_epochs * 0.2)) + // self.anneal_cap + ) + + # Dropout parameters + self.drop_encoder = drop_encoder + self.drop_decoder = drop_decoder + + # Path to save optimal model + self.save_path = save_path + + # Create StandardVAE model + self._create_model() + + def _create_model(self): + """Build and compile model.""" + # Encoding + self.x = Input(shape=(self.original_dim,)) + self.x_ = Lambda(lambda x: K.l2_normalize(x, axis=1))(self.x) + self.dropout_encoder = Dropout(self.drop_encoder)(self.x_) + + self.h = Dense( + self.intermediate_dim, + activation="tanh", + kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform( + seed=self.seed + ), + bias_initializer=tf.compat.v1.keras.initializers.truncated_normal( + stddev=0.001, seed=self.seed + ), + )(self.dropout_encoder) + self.z_mean = Dense(self.latent_dim)(self.h) + self.z_log_var = Dense(self.latent_dim)(self.h) + + # Sampling + self.z = Lambda(self._take_sample, output_shape=(self.latent_dim,))( + [self.z_mean, self.z_log_var] + ) + + # Decoding + self.h_decoder = Dense( + self.intermediate_dim, + activation="tanh", + kernel_initializer=tf.compat.v1.keras.initializers.glorot_uniform( + seed=self.seed + ), + bias_initializer=tf.compat.v1.keras.initializers.truncated_normal( + stddev=0.001, seed=self.seed + ), + ) + self.dropout_decoder = Dropout(self.drop_decoder) + self.x_bar = Dense(self.original_dim) + self.h_decoded = self.h_decoder(self.z) + self.h_decoded_ = self.dropout_decoder(self.h_decoded) + self.x_decoded = self.x_bar(self.h_decoded_) + + # Training + self.model = Model(self.x, self.x_decoded) + self.model.compile( + optimizer=tf.keras.optimizers.legacy.Adam(learning_rate=0.001), + loss=self._get_vae_loss, + ) + + def _get_vae_loss(self, x, x_bar): + """Calculate negative ELBO (NELBO).""" + log_softmax_var = tf.nn.log_softmax(x_bar) + self.neg_ll = -tf.reduce_mean( + input_tensor=tf.reduce_sum(input_tensor=log_softmax_var * x, axis=-1) + ) + a = tf.keras.backend.print_tensor(self.neg_ll) # noqa: F841 + # calculate positive Kullback–Leibler divergence divergence term + kl_loss = K.mean( + 0.5 + * K.sum( + -1 - self.z_log_var + K.square(self.z_mean) + K.exp(self.z_log_var), + axis=-1, + ) + ) + + # obtain negative ELBO + neg_ELBO = self.neg_ll + self.beta * kl_loss + + return neg_ELBO + + def _take_sample(self, args): + """Sample epsilon ∼ N (0,I) and compute z via reparametrization trick.""" + + """Calculate latent vector using the reparametrization trick. + The idea is that sampling from N (_mean, _var) is s the same as sampling from _mean+ epsilon * _var + where epsilon ∼ N(0,I).""" + # _mean and _log_var calculated in encoder + _mean, _log_var = args + + # epsilon + epsilon = K.random_normal( + shape=(K.shape(_mean)[0], self.latent_dim), + mean=0.0, + stddev=1.0, + seed=self.seed, + ) + + return _mean + K.exp(_log_var / 2) * epsilon + +
[docs] def nn_batch_generator(self, x_train): + """Used for splitting dataset in batches. + + Args: + x_train (numpy.ndarray): The click matrix for the train set, with float values. + """ + # Shuffle the batch + np.random.seed(self.seed) + shuffle_index = np.arange(np.shape(x_train)[0]) + np.random.shuffle(shuffle_index) + x = x_train[shuffle_index, :] + y = x_train[shuffle_index, :] + + # Iterate until making a full epoch + counter = 0 + while 1: + index_batch = shuffle_index[ + self.batch_size * counter : self.batch_size * (counter + 1) + ] + # Decompress batch + x_batch = x[index_batch, :] + y_batch = y[index_batch, :] + counter += 1 + yield (np.array(x_batch), np.array(y_batch)) + + # Stopping rule + if counter >= self.number_of_batches: + counter = 0
+ +
[docs] def fit(self, x_train, x_valid, x_val_tr, x_val_te, mapper): + """Fit model with the train sets and validate on the validation set. + + Args: + x_train (numpy.ndarray): the click matrix for the train set. + x_valid (numpy.ndarray): the click matrix for the validation set. + x_val_tr (numpy.ndarray): the click matrix for the validation set training part. + x_val_te (numpy.ndarray): the click matrix for the validation set testing part. + mapper (object): the mapper for converting click matrix to dataframe. It can be AffinityMatrix. + """ + # initialise LossHistory used for saving loss of validation and train set per epoch + history = LossHistory() + + # initialise Metrics used for calculating NDCG@k per epoch + # and saving the model weights with the highest NDCG@k value + metrics = Metrics( + model=self.model, + val_tr=x_val_tr, + val_te=x_val_te, + mapper=mapper, + k=self.k, + save_path=self.save_path, + ) + + self.reduce_lr = ReduceLROnPlateau( + monitor="val_loss", factor=0.2, patience=1, min_lr=0.0001 + ) + + if self.annealing: + # initialise AnnealingCallback for annealing process + anneal = AnnealingCallback( + self.beta, self.anneal_cap, self.total_anneal_steps + ) + + # fit model + self.model.fit_generator( + generator=self.nn_batch_generator(x_train), + steps_per_epoch=self.number_of_batches, + epochs=self.n_epochs, + verbose=self.verbose, + callbacks=[metrics, history, self.reduce_lr, anneal], + validation_data=(x_valid, x_valid), + ) + + self.ls_beta = anneal.get_data() + + else: + self.model.fit_generator( + generator=self.nn_batch_generator(x_train), + steps_per_epoch=self.number_of_batches, + epochs=self.n_epochs, + verbose=self.verbose, + callbacks=[metrics, history, self.reduce_lr], + validation_data=(x_valid, x_valid), + ) + + # save lists + self.train_loss = history.losses + self.val_loss = history.val_losses + self.val_ndcg = metrics.get_data()
+ +
[docs] def get_optimal_beta(self): + """Returns the value of the optimal beta.""" + if self.annealing: + # find the epoch/index that had the highest NDCG@k value + index_max_ndcg = np.argmax(self.val_ndcg) + + # using this index find the value that beta had at this epoch + return self.ls_beta[index_max_ndcg] + else: + return self.beta
+ +
[docs] def display_metrics(self): + """Plots: + 1) Loss per epoch both for validation and train set + 2) NDCG@k per epoch of the validation set + """ + # Plot setup + plt.figure(figsize=(14, 5)) + sns.set(style="whitegrid") + + # Plot loss on the left graph + plt.subplot(1, 2, 1) + plt.plot(self.train_loss, color="b", linestyle="-", label="Train") + plt.plot(self.val_loss, color="r", linestyle="-", label="Val") + plt.title("\n") + plt.xlabel("Epochs", size=14) + plt.ylabel("Loss", size=14) + plt.legend(loc="upper left") + + # Plot NDCG on the right graph + plt.subplot(1, 2, 2) + plt.plot(self.val_ndcg, color="r", linestyle="-", label="Val") + plt.title("\n") + plt.xlabel("Epochs", size=14) + plt.ylabel("NDCG@k", size=14) + plt.legend(loc="upper left") + + # Add title + plt.suptitle("TRAINING AND VALIDATION METRICS HISTORY", size=16) + plt.tight_layout(pad=2)
+ +
[docs] def recommend_k_items(self, x, k, remove_seen=True): + """Returns the top-k items ordered by a relevancy score. + Obtained probabilities are used as recommendation score. + + Args: + x (numpy.ndarray, int32): input click matrix. + k (scalar, int32): the number of items to recommend. + Returns: + numpy.ndarray, float: A sparse matrix containing the top_k elements ordered by their score. + """ + # return optimal model + self.model.load_weights(self.save_path) + + # obtain scores + score = self.model.predict(x) + + if remove_seen: + # if true, it removes items from the train set by setting them to zero + seen_mask = np.not_equal(x, 0) + score[seen_mask] = 0 + # get the top k items + top_items = np.argpartition(-score, range(k), axis=1)[:, :k] + # get a copy of the score matrix + score_c = score.copy() + # set to zero the k elements + score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0 + # set to zeros all elements other then the k + top_scores = score - score_c + return top_scores
+ +
[docs] def ndcg_per_epoch(self): + """Returns the list of NDCG@k at each epoch.""" + return self.val_ndcg
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/vae/standard_vae.html b/_modules/recommenders/models/vae/standard_vae.html new file mode 100644 index 0000000000..cfb24165e2 --- /dev/null +++ b/_modules/recommenders/models/vae/standard_vae.html @@ -0,0 +1,880 @@ + + + + + + + + + + + recommenders.models.vae.standard_vae — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.vae.standard_vae

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import numpy as np
+import matplotlib.pyplot as plt
+import seaborn as sns
+import tensorflow as tf
+from tensorflow.keras.layers import *
+from tensorflow.keras.models import Model
+from tensorflow.keras.losses import binary_crossentropy
+from tensorflow.keras import backend as K
+from tensorflow.keras.callbacks import ReduceLROnPlateau, Callback
+
+from recommenders.evaluation.python_evaluation import ndcg_at_k
+
+
+
[docs]class LossHistory(Callback): + """This class is used for saving the validation loss and the training loss per epoch.""" + +
[docs] def on_train_begin(self, logs={}): + """Initialise the lists where the loss of training and validation will be saved.""" + self.losses = [] + self.val_losses = []
+ +
[docs] def on_epoch_end(self, epoch, logs={}): + """Save the loss of training and validation set at the end of each epoch.""" + self.losses.append(logs.get("loss")) + self.val_losses.append(logs.get("val_loss"))
+ + +
[docs]class Metrics(Callback): + """Callback function used to calculate the NDCG@k metric of validation set at the end of each epoch. + Weights of the model with the highest NDCG@k value is saved.""" + + def __init__(self, model, val_tr, val_te, mapper, k, save_path=None): + + """Initialize the class parameters. + + Args: + model: trained model for validation. + val_tr (numpy.ndarray, float): the click matrix for the validation set training part. + val_te (numpy.ndarray, float): the click matrix for the validation set testing part. + mapper (AffinityMatrix): the mapper for converting click matrix to dataframe. + k (int): number of top k items per user (optional). + save_path (str): Default path to save weights. + """ + # Model + self.model = model + + # Initial value of NDCG + self.best_ndcg = 0.0 + + # Validation data: training and testing parts + self.val_tr = val_tr + self.val_te = val_te + + # Mapper for converting from sparse matrix to dataframe + self.mapper = mapper + + # Top k items to recommend + self.k = k + + # Options to save the weights of the model for future use + self.save_path = save_path + +
[docs] def on_train_begin(self, logs={}): + """Initialise the list for validation NDCG@k.""" + self._data = []
+ +
[docs] def recommend_k_items(self, x, k, remove_seen=True): + """Returns the top-k items ordered by a relevancy score. + Obtained probabilities are used as recommendation score. + + Args: + x (numpy.ndarray, int32): input click matrix. + k (scalar, int32): the number of items to recommend. + + Returns: + numpy.ndarray: A sparse matrix containing the top_k elements ordered by their score. + + """ + # obtain scores + score = self.model.predict(x) + + if remove_seen: + # if true, it removes items from the train set by setting them to zero + seen_mask = np.not_equal(x, 0) + score[seen_mask] = 0 + + # get the top k items + top_items = np.argpartition(-score, range(k), axis=1)[:, :k] + + # get a copy of the score matrix + score_c = score.copy() + + # set to zero the k elements + score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0 + + # set to zeros all elements other then the k + top_scores = score - score_c + + return top_scores
+ +
[docs] def on_epoch_end(self, batch, logs={}): + """At the end of each epoch calculate NDCG@k of the validation set. + If the model performance is improved, the model weights are saved. + Update the list of validation NDCG@k by adding obtained value. + """ + # recommend top k items based on training part of validation set + top_k = self.recommend_k_items(x=self.val_tr, k=self.k, remove_seen=True) + + # convert recommendations from sparse matrix to dataframe + top_k_df = self.mapper.map_back_sparse(top_k, kind="prediction") + test_df = self.mapper.map_back_sparse(self.val_te, kind="ratings") + + # calculate NDCG@k + NDCG = ndcg_at_k(test_df, top_k_df, col_prediction="prediction", k=self.k) + + # check if there is an improvement in NDCG, if so, update the weights of the saved model + if NDCG > self.best_ndcg: + self.best_ndcg = NDCG + + # save the weights of the optimal model + if self.save_path is not None: + self.model.save(self.save_path) + + self._data.append(NDCG)
+ +
[docs] def get_data(self): + """Returns a list of the NDCG@k of the validation set metrics calculated + at the end of each epoch.""" + return self._data
+ + +
[docs]class AnnealingCallback(Callback): + """This class is used for updating the value of β during the annealing process. + When β reaches the value of anneal_cap, it stops increasing. + """ + + def __init__(self, beta, anneal_cap, total_anneal_steps): + + """Constructor + + Args: + beta (float): current value of beta. + anneal_cap (float): maximum value that beta can reach. + total_anneal_steps (int): total number of annealing steps. + """ + # maximum value that beta can take + self.anneal_cap = anneal_cap + + # initial value of beta + self.beta = beta + + # update_count used for calculating the updated value of beta + self.update_count = 0 + + # total annealing steps + self.total_anneal_steps = total_anneal_steps + +
[docs] def on_train_begin(self, logs={}): + """Initialise a list in which the beta value will be saved at the end of each epoch.""" + self._beta = []
+ +
[docs] def on_batch_end(self, epoch, logs={}): + """At the end of each batch the beta should is updated until it reaches the values of anneal cap.""" + self.update_count = self.update_count + 1 + + new_beta = min( + 1.0 * self.update_count / self.total_anneal_steps, self.anneal_cap + ) + + K.set_value(self.beta, new_beta)
+ +
[docs] def on_epoch_end(self, epoch, logs={}): + """At the end of each epoch save the value of beta in _beta list.""" + tmp = K.eval(self.beta) + self._beta.append(tmp)
+ +
[docs] def get_data(self): + """Returns a list of the beta values per epoch.""" + return self._beta
+ + +
[docs]class StandardVAE: + """Standard Variational Autoencoders (VAE) for Collaborative Filtering implementation.""" + + def __init__( + self, + n_users, + original_dim, + intermediate_dim=200, + latent_dim=70, + n_epochs=400, + batch_size=100, + k=100, + verbose=1, + drop_encoder=0.5, + drop_decoder=0.5, + beta=1.0, + annealing=False, + anneal_cap=1.0, + seed=None, + save_path=None, + ): + + """Initialize class parameters. + + Args: + n_users (int): Number of unique users in the train set. + original_dim (int): Number of unique items in the train set. + intermediate_dim (int): Dimension of intermediate space. + latent_dim (int): Dimension of latent space. + n_epochs (int): Number of epochs for training. + batch_size (int): Batch size. + k (int): number of top k items per user. + verbose (int): Whether to show the training output or not. + drop_encoder (float): Dropout percentage of the encoder. + drop_decoder (float): Dropout percentage of the decoder. + beta (float): a constant parameter β in the ELBO function, + when you are not using annealing (annealing=False) + annealing (bool): option of using annealing method for training the model (True) + or not using annealing, keeping a constant beta (False) + anneal_cap (float): maximum value that beta can take during annealing process. + seed (int): Seed. + save_path (str): Default path to save weights. + """ + # Seed + self.seed = seed + np.random.seed(self.seed) + + # Parameters + self.n_users = n_users + self.original_dim = original_dim + self.intermediate_dim = intermediate_dim + self.latent_dim = latent_dim + self.n_epochs = n_epochs + self.batch_size = batch_size + self.k = k + self.verbose = verbose + + # Compute samples per epoch + self.number_of_batches = self.n_users // self.batch_size + + # Annealing parameters + self.anneal_cap = anneal_cap + self.annealing = annealing + + if self.annealing: + self.beta = K.variable(0.0) + else: + self.beta = beta + + # Compute total annealing steps + self.total_anneal_steps = ( + self.number_of_batches * (self.n_epochs - int(self.n_epochs * 0.2)) + ) // self.anneal_cap + + # Dropout parameters + self.drop_encoder = drop_encoder + self.drop_decoder = drop_decoder + + # Path to save optimal model + self.save_path = save_path + + # Create StandardVAE model + self._create_model() + + def _create_model(self): + """Build and compile model.""" + # Encoding + self.x = Input(shape=(self.original_dim,)) + self.dropout_encoder = Dropout(self.drop_encoder)(self.x) + self.h = Dense(self.intermediate_dim, activation="tanh")(self.dropout_encoder) + self.z_mean = Dense(self.latent_dim)(self.h) + self.z_log_var = Dense(self.latent_dim)(self.h) + + # Sampling + self.z = Lambda(self._take_sample, output_shape=(self.latent_dim,))( + [self.z_mean, self.z_log_var] + ) + + # Decoding + self.h_decoder = Dense(self.intermediate_dim, activation="tanh") + self.dropout_decoder = Dropout(self.drop_decoder) + self.x_bar = Dense(self.original_dim, activation="softmax") + self.h_decoded = self.h_decoder(self.z) + self.h_decoded_ = self.dropout_decoder(self.h_decoded) + self.x_decoded = self.x_bar(self.h_decoded_) + + # Training + self.model = Model(self.x, self.x_decoded) + self.model.compile( + optimizer=tf.keras.optimizers.legacy.Adam(learning_rate=0.001), + loss=self._get_vae_loss, + ) + + def _get_vae_loss(self, x, x_bar): + """Calculate negative ELBO (NELBO).""" + # Reconstruction error: logistic log likelihood + reconst_loss = self.original_dim * binary_crossentropy(x, x_bar) + + # Kullback–Leibler divergence + kl_loss = 0.5 * K.sum( + -1 - self.z_log_var + K.square(self.z_mean) + K.exp(self.z_log_var), axis=-1 + ) + + return reconst_loss + self.beta * kl_loss + + def _take_sample(self, args): + """Sample epsilon ∼ N (0,I) and compute z via reparametrization trick.""" + """Calculate latent vector using the reparametrization trick. + The idea is that sampling from N (_mean, _var) is s the same as sampling from _mean+ epsilon * _var + where epsilon ∼ N(0,I).""" + # sampling from latent dimension for decoder/generative part of network + _mean, _log_var = args + epsilon = K.random_normal( + shape=(K.shape(_mean)[0], self.latent_dim), + mean=0.0, + stddev=1.0, + seed=self.seed, + ) + + return _mean + K.exp(_log_var / 2) * epsilon + +
[docs] def nn_batch_generator(self, x_train): + """Used for splitting dataset in batches. + + Args: + x_train (numpy.ndarray): The click matrix for the train set with float values. + """ + # Shuffle the batch + np.random.seed(self.seed) + shuffle_index = np.arange(np.shape(x_train)[0]) + np.random.shuffle(shuffle_index) + x = x_train[shuffle_index, :] + y = x_train[shuffle_index, :] + + # Iterate until making a full epoch + counter = 0 + while 1: + index_batch = shuffle_index[ + self.batch_size * counter : self.batch_size * (counter + 1) + ] + # Decompress batch + x_batch = x[index_batch, :] + y_batch = y[index_batch, :] + counter += 1 + yield (np.array(x_batch), np.array(y_batch)) + + # Stopping rule + if counter >= self.number_of_batches: + counter = 0
+ +
[docs] def fit(self, x_train, x_valid, x_val_tr, x_val_te, mapper): + """Fit model with the train sets and validate on the validation set. + + Args: + x_train (numpy.ndarray): The click matrix for the train set. + x_valid (numpy.ndarray): The click matrix for the validation set. + x_val_tr (numpy.ndarray): The click matrix for the validation set training part. + x_val_te (numpy.ndarray): The click matrix for the validation set testing part. + mapper (object): The mapper for converting click matrix to dataframe. It can be AffinityMatrix. + """ + # initialise LossHistory used for saving loss of validation and train set per epoch + history = LossHistory() + + # initialise Metrics used for calculating NDCG@k per epoch + # and saving the model weights with the highest NDCG@k value + metrics = Metrics( + model=self.model, + val_tr=x_val_tr, + val_te=x_val_te, + mapper=mapper, + k=self.k, + save_path=self.save_path, + ) + + self.reduce_lr = ReduceLROnPlateau( + monitor="val_loss", factor=0.2, patience=1, min_lr=0.0001 + ) + + if self.annealing: + # initialise AnnealingCallback for annealing process + anneal = AnnealingCallback( + self.beta, self.anneal_cap, self.total_anneal_steps + ) + + # fit model + self.model.fit_generator( + generator=self.nn_batch_generator(x_train), + steps_per_epoch=self.number_of_batches, + epochs=self.n_epochs, + verbose=self.verbose, + callbacks=[metrics, history, self.reduce_lr, anneal], + validation_data=(x_valid, x_valid), + ) + + self.ls_beta = anneal.get_data() + + else: + self.model.fit_generator( + generator=self.nn_batch_generator(x_train), + steps_per_epoch=self.number_of_batches, + epochs=self.n_epochs, + verbose=self.verbose, + callbacks=[metrics, history, self.reduce_lr], + validation_data=(x_valid, x_valid), + ) + + # save lists + self.train_loss = history.losses + self.val_loss = history.val_losses + self.val_ndcg = metrics.get_data()
+ +
[docs] def get_optimal_beta(self): + """Returns the value of the optimal beta.""" + # find the epoch/index that had the highest NDCG@k value + index_max_ndcg = np.argmax(self.val_ndcg) + + # using this index find the value that beta had at this epoch + optimal_beta = self.ls_beta[index_max_ndcg] + + return optimal_beta
+ +
[docs] def display_metrics(self): + """Plots: + 1) Loss per epoch both for validation and train sets + 2) NDCG@k per epoch of the validation set + """ + # Plot setup + plt.figure(figsize=(14, 5)) + sns.set(style="whitegrid") + + # Plot loss on the left graph + plt.subplot(1, 2, 1) + plt.plot(self.train_loss, color="b", linestyle="-", label="Train") + plt.plot(self.val_loss, color="r", linestyle="-", label="Val") + plt.title("\n") + plt.xlabel("Epochs", size=14) + plt.ylabel("Loss", size=14) + plt.legend(loc="upper left") + + # Plot NDCG on the right graph + plt.subplot(1, 2, 2) + plt.plot(self.val_ndcg, color="r", linestyle="-", label="Val") + plt.title("\n") + plt.xlabel("Epochs", size=14) + plt.ylabel("NDCG@k", size=14) + plt.legend(loc="upper left") + + # Add title + plt.suptitle("TRAINING AND VALIDATION METRICS HISTORY", size=16) + plt.tight_layout(pad=2)
+ +
[docs] def recommend_k_items(self, x, k, remove_seen=True): + """Returns the top-k items ordered by a relevancy score. + + Obtained probabilities are used as recommendation score. + + Args: + x (numpy.ndarray): Input click matrix, with `int32` values. + k (scalar): The number of items to recommend. + + Returns: + numpy.ndarray: A sparse matrix containing the top_k elements ordered by their score. + + """ + # return optimal model + self.model.load_weights(self.save_path) + + # obtain scores + score = self.model.predict(x) + if remove_seen: + # if true, it removes items from the train set by setting them to zero + seen_mask = np.not_equal(x, 0) + score[seen_mask] = 0 + # get the top k items + top_items = np.argpartition(-score, range(k), axis=1)[:, :k] + # get a copy of the score matrix + score_c = score.copy() + # set to zero the k elements + score_c[np.arange(score_c.shape[0])[:, None], top_items] = 0 + # set to zeros all elements other then the k + top_scores = score - score_c + return top_scores
+ +
[docs] def ndcg_per_epoch(self): + """Returns the list of NDCG@k at each epoch.""" + + return self.val_ndcg
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/vowpal_wabbit/vw.html b/_modules/recommenders/models/vowpal_wabbit/vw.html new file mode 100644 index 0000000000..01dbfb937e --- /dev/null +++ b/_modules/recommenders/models/vowpal_wabbit/vw.html @@ -0,0 +1,655 @@ + + + + + + + + + + + recommenders.models.vowpal_wabbit.vw — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.vowpal_wabbit.vw

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+"""
+This file provides a wrapper to run Vowpal Wabbit from the command line through python.
+It is not recommended to use this approach in production, there are python bindings that can be installed from the
+repository or pip or the command line can be used. This is merely to demonstrate vw usage in the example notebooks.
+"""
+
+import os
+from subprocess import run
+from tempfile import TemporaryDirectory
+import pandas as pd
+
+from recommenders.utils.constants import (
+    DEFAULT_USER_COL,
+    DEFAULT_ITEM_COL,
+    DEFAULT_RATING_COL,
+    DEFAULT_TIMESTAMP_COL,
+    DEFAULT_PREDICTION_COL,
+)
+
+
+
[docs]class VW: + """Vowpal Wabbit Class""" + + def __init__( + self, + col_user=DEFAULT_USER_COL, + col_item=DEFAULT_ITEM_COL, + col_rating=DEFAULT_RATING_COL, + col_timestamp=DEFAULT_TIMESTAMP_COL, + col_prediction=DEFAULT_PREDICTION_COL, + **kwargs, + ): + """Initialize model parameters + + Args: + col_user (str): user column name + col_item (str): item column name + col_rating (str): rating column name + col_timestamp (str): timestamp column name + col_prediction (str): prediction column name + """ + + # create temporary files + self.tempdir = TemporaryDirectory() + self.train_file = os.path.join(self.tempdir.name, "train.dat") + self.test_file = os.path.join(self.tempdir.name, "test.dat") + self.model_file = os.path.join(self.tempdir.name, "vw.model") + self.prediction_file = os.path.join(self.tempdir.name, "prediction.dat") + + # set DataFrame columns + self.col_user = col_user + self.col_item = col_item + self.col_rating = col_rating + self.col_timestamp = col_timestamp + self.col_prediction = col_prediction + + self.logistic = "logistic" in kwargs.values() + self.train_cmd = self.parse_train_params(params=kwargs) + self.test_cmd = self.parse_test_params(params=kwargs) + +
[docs] @staticmethod + def to_vw_cmd(params): + """Convert dictionary of parameters to vw command line. + + Args: + params (dict): key = parameter, value = value (use True if parameter is just a flag) + + Returns: + list[str]: vw command line parameters as list of strings + """ + + cmd = ["vw"] + for k, v in params.items(): + if v is False: + # don't add parameters with a value == False + continue + + # add the correct hyphen to the parameter + cmd.append(f"-{k}" if len(k) == 1 else f"--{k}") + if v is not True: + # don't add an argument for parameters with value == True + cmd.append("{}".format(v)) + + return cmd
+ +
[docs] def parse_train_params(self, params): + """Parse input hyper-parameters to build vw train commands + + Args: + params (dict): key = parameter, value = value (use True if parameter is just a flag) + + Returns: + list[str]: vw command line parameters as list of strings + """ + + # make a copy of the original hyper parameters + train_params = params.copy() + + # remove options that are handled internally, not supported, or test only parameters + invalid = [ + "data", + "final_regressor", + "invert_hash", + "readable_model", + "t", + "testonly", + "i", + "initial_regressor", + "link", + ] + + for option in invalid: + if option in train_params: + del train_params[option] + + train_params.update( + { + "d": self.train_file, + "f": self.model_file, + "quiet": params.get("quiet", True), + } + ) + return self.to_vw_cmd(params=train_params)
+ +
[docs] def parse_test_params(self, params): + """Parse input hyper-parameters to build vw test commands + + Args: + params (dict): key = parameter, value = value (use True if parameter is just a flag) + + Returns: + list[str]: vw command line parameters as list of strings + """ + + # make a copy of the original hyper parameters + test_params = params.copy() + + # remove options that are handled internally, ot supported or train only parameters + invalid = [ + "data", + "f", + "final_regressor", + "initial_regressor", + "test_only", + "invert_hash", + "readable_model", + "b", + "bit_precision", + "holdout_off", + "c", + "cache", + "k", + "kill_cache", + "l", + "learning_rate", + "l1", + "l2", + "initial_t", + "power_t", + "decay_learning_rate", + "q", + "quadratic", + "cubic", + "i", + "interactions", + "rank", + "lrq", + "lrqdropout", + "oaa", + ] + for option in invalid: + if option in test_params: + del test_params[option] + + test_params.update( + { + "d": self.test_file, + "i": self.model_file, + "quiet": params.get("quiet", True), + "p": self.prediction_file, + "t": True, + } + ) + return self.to_vw_cmd(params=test_params)
+ +
[docs] def to_vw_file(self, df, train=True): + """Convert Pandas DataFrame to vw input format file + + Args: + df (pandas.DataFrame): input DataFrame + train (bool): flag for train mode (or test mode if False) + """ + + output = self.train_file if train else self.test_file + with open(output, "w") as f: + # extract columns and create a new dataframe + tmp = df[[self.col_rating, self.col_user, self.col_item]].reset_index() + + if train: + # we need to reset the rating type to an integer to simplify the vw formatting + tmp[self.col_rating] = tmp[self.col_rating].astype("int64") + + # convert rating to binary value + if self.logistic: + max_value = tmp[self.col_rating].max() + tmp[self.col_rating] = tmp[self.col_rating].apply( + lambda x: 2 * round(x / max_value) - 1 + ) + else: + tmp[self.col_rating] = "" + + # convert each row to VW input format (https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Input-format) + # [label] [tag]|[user namespace] [user id feature] |[item namespace] [movie id feature] + # label is the true rating, tag is a unique id for the example just used to link predictions to truth + # user and item namespaces separate features to support interaction features through command line options + for _, row in tmp.iterrows(): + f.write( + "{rating} {index}|user {userID} |item {itemID}\n".format( + rating=row[self.col_rating], + index=row["index"], + userID=row[self.col_user], + itemID=row[self.col_item], + ) + )
+ +
[docs] def fit(self, df): + """Train model + + Args: + df (pandas.DataFrame): input training data + """ + + # write dataframe to disk in vw format + self.to_vw_file(df=df) + + # train model + run(self.train_cmd, check=True)
+ +
[docs] def predict(self, df): + """Predict results + + Args: + df (pandas.DataFrame): input test data + """ + + # write dataframe to disk in vw format + self.to_vw_file(df=df, train=False) + + # generate predictions + run(self.test_cmd, check=True) + + # read predictions + return df.join( + pd.read_csv( + self.prediction_file, + delim_whitespace=True, + names=[self.col_prediction], + index_col=1, + ) + )
+ + def __del__(self): + self.tempdir.cleanup()
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/models/wide_deep/wide_deep_utils.html b/_modules/recommenders/models/wide_deep/wide_deep_utils.html new file mode 100644 index 0000000000..b8a4d8acf0 --- /dev/null +++ b/_modules/recommenders/models/wide_deep/wide_deep_utils.html @@ -0,0 +1,602 @@ + + + + + + + + + + + recommenders.models.wide_deep.wide_deep_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.models.wide_deep.wide_deep_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import tensorflow as tf
+
+from recommenders.utils.constants import DEFAULT_USER_COL, DEFAULT_ITEM_COL
+from recommenders.utils.tf_utils import MODEL_DIR
+
+
+
[docs]def build_feature_columns( + users, + items, + user_col=DEFAULT_USER_COL, + item_col=DEFAULT_ITEM_COL, + item_feat_col=None, + crossed_feat_dim=1000, + user_dim=8, + item_dim=8, + item_feat_shape=None, + model_type="wide_deep", +): + """Build wide and/or deep feature columns for TensorFlow high-level API Estimator. + + Args: + users (iterable): Distinct user ids. + items (iterable): Distinct item ids. + user_col (str): User column name. + item_col (str): Item column name. + item_feat_col (str): Item feature column name for 'deep' or 'wide_deep' model. + crossed_feat_dim (int): Crossed feature dimension for 'wide' or 'wide_deep' model. + user_dim (int): User embedding dimension for 'deep' or 'wide_deep' model. + item_dim (int): Item embedding dimension for 'deep' or 'wide_deep' model. + item_feat_shape (int or an iterable of integers): Item feature array shape for 'deep' or 'wide_deep' model. + model_type (str): Model type, either + 'wide' for a linear model, + 'deep' for a deep neural networks, or + 'wide_deep' for a combination of linear model and neural networks. + + Returns: + list, list: + - The wide feature columns + - The deep feature columns. If only the wide model is selected, the deep column list is empty and viceversa. + """ + if model_type not in ["wide", "deep", "wide_deep"]: + raise ValueError("Model type should be either 'wide', 'deep', or 'wide_deep'") + + user_ids = tf.feature_column.categorical_column_with_vocabulary_list( + user_col, users + ) + item_ids = tf.feature_column.categorical_column_with_vocabulary_list( + item_col, items + ) + + if model_type == "wide": + return _build_wide_columns(user_ids, item_ids, crossed_feat_dim), [] + elif model_type == "deep": + return ( + [], + _build_deep_columns( + user_ids, item_ids, user_dim, item_dim, item_feat_col, item_feat_shape + ), + ) + elif model_type == "wide_deep": + return ( + _build_wide_columns(user_ids, item_ids, crossed_feat_dim), + _build_deep_columns( + user_ids, item_ids, user_dim, item_dim, item_feat_col, item_feat_shape + ), + )
+ + +def _build_wide_columns(user_ids, item_ids, hash_bucket_size=1000): + """Build wide feature (crossed) columns. `user_ids` * `item_ids` are hashed into `hash_bucket_size` + + Args: + user_ids (tf.feature_column.categorical_column_with_vocabulary_list): User ids. + item_ids (tf.feature_column.categorical_column_with_vocabulary_list): Item ids. + hash_bucket_size (int): Hash bucket size. + + Returns: + list: Wide feature columns. + """ + # Including the original features in addition to the crossed one is recommended to address hash collision problem. + return [ + user_ids, + item_ids, + tf.feature_column.crossed_column( + [user_ids, item_ids], hash_bucket_size=hash_bucket_size + ), + ] + + +def _build_deep_columns( + user_ids, item_ids, user_dim, item_dim, item_feat_col=None, item_feat_shape=1 +): + """Build deep feature columns + + Args: + user_ids (tf.feature_column.categorical_column_with_vocabulary_list): User ids. + item_ids (tf.feature_column.categorical_column_with_vocabulary_list): Item ids. + user_dim (int): User embedding dimension. + item_dim (int): Item embedding dimension. + item_feat_col (str): Item feature column name. + item_feat_shape (int or an iterable of integers): Item feature array shape. + + Returns: + list: Deep feature columns. + """ + deep_columns = [ + # User embedding + tf.feature_column.embedding_column( + categorical_column=user_ids, dimension=user_dim, max_norm=user_dim**0.5 + ), + # Item embedding + tf.feature_column.embedding_column( + categorical_column=item_ids, dimension=item_dim, max_norm=item_dim**0.5 + ), + ] + # Item feature + if item_feat_col is not None: + deep_columns.append( + tf.feature_column.numeric_column( + item_feat_col, shape=item_feat_shape, dtype=tf.float32 + ) + ) + return deep_columns + + +
[docs]def build_model( + model_dir=MODEL_DIR, + wide_columns=(), + deep_columns=(), + linear_optimizer="Ftrl", + dnn_optimizer="Adagrad", + dnn_hidden_units=(128, 128), + dnn_dropout=0.0, + dnn_batch_norm=True, + log_every_n_iter=1000, + save_checkpoints_steps=10000, + seed=None, +): + """Build wide-deep model. + + To generate wide model, pass wide_columns only. + To generate deep model, pass deep_columns only. + To generate wide_deep model, pass both wide_columns and deep_columns. + + Args: + model_dir (str): Model checkpoint directory. + wide_columns (list of tf.feature_column): Wide model feature columns. + deep_columns (list of tf.feature_column): Deep model feature columns. + linear_optimizer (str or tf.train.Optimizer): Wide model optimizer name or object. + dnn_optimizer (str or tf.train.Optimizer): Deep model optimizer name or object. + dnn_hidden_units (list of int): Deep model hidden units. E.g., [10, 10, 10] is three layers of 10 nodes each. + dnn_dropout (float): Deep model's dropout rate. + dnn_batch_norm (bool): Deep model's batch normalization flag. + log_every_n_iter (int): Log the training loss for every n steps. + save_checkpoints_steps (int): Model checkpoint frequency. + seed (int): Random seed. + + Returns: + tf.estimator.Estimator: Model + """ + gpu_config = tf.compat.v1.ConfigProto() + gpu_config.gpu_options.allow_growth = True # dynamic memory allocation + + # TensorFlow training setup + config = tf.estimator.RunConfig( + tf_random_seed=seed, + log_step_count_steps=log_every_n_iter, + save_checkpoints_steps=save_checkpoints_steps, + session_config=gpu_config, + ) + + if len(wide_columns) > 0 and len(deep_columns) == 0: + model = tf.compat.v1.estimator.LinearRegressor( + model_dir=model_dir, + config=config, + feature_columns=wide_columns, + optimizer=linear_optimizer, + ) + elif len(wide_columns) == 0 and len(deep_columns) > 0: + model = tf.compat.v1.estimator.DNNRegressor( + model_dir=model_dir, + config=config, + feature_columns=deep_columns, + hidden_units=dnn_hidden_units, + optimizer=dnn_optimizer, + dropout=dnn_dropout, + batch_norm=dnn_batch_norm, + ) + elif len(wide_columns) > 0 and len(deep_columns) > 0: + model = tf.compat.v1.estimator.DNNLinearCombinedRegressor( + model_dir=model_dir, + config=config, + # wide settings + linear_feature_columns=wide_columns, + linear_optimizer=linear_optimizer, + # deep settings + dnn_feature_columns=deep_columns, + dnn_hidden_units=dnn_hidden_units, + dnn_optimizer=dnn_optimizer, + dnn_dropout=dnn_dropout, + batch_norm=dnn_batch_norm, + ) + else: + raise ValueError( + "To generate wide model, set wide_columns.\n" + "To generate deep model, set deep_columns.\n" + "To generate wide_deep model, set both wide_columns and deep_columns." + ) + + return model
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/tuning/parameter_sweep.html b/_modules/recommenders/tuning/parameter_sweep.html new file mode 100644 index 0000000000..4a847743c8 --- /dev/null +++ b/_modules/recommenders/tuning/parameter_sweep.html @@ -0,0 +1,445 @@ + + + + + + + + + + + recommenders.tuning.parameter_sweep — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.tuning.parameter_sweep

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+#
+# Utility functions for parameter sweep.
+
+from itertools import product
+
+
+
[docs]def generate_param_grid(params): + """Generator of parameter grids. + Generate parameter lists from a parameter dictionary in the form of: + + .. code-block:: python + + { + "param1": [value1, value2], + "param2": [value1, value2] + } + + to: + + .. code-block:: python + + [ + {"param1": value1, "param2": value1}, + {"param1": value2, "param2": value1}, + {"param1": value1, "param2": value2}, + {"param1": value2, "param2": value2} + ] + + Args: + param_dict (dict): dictionary of parameters and values (in a list). + + Return: + list: A list of parameter dictionary string that can be fed directly into + model builder as keyword arguments. + """ + param_new = {} + param_fixed = {} + + for key, value in params.items(): + if isinstance(value, list): + param_new[key] = value + else: + param_fixed[key] = value + + items = sorted(param_new.items()) + keys, values = zip(*items) + + params_exp = [] + for v in product(*values): + param_exp = dict(zip(keys, v)) + param_exp.update(param_fixed) + params_exp.append(param_exp) + + return params_exp
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/general_utils.html b/_modules/recommenders/utils/general_utils.html new file mode 100644 index 0000000000..3b2fbb02b7 --- /dev/null +++ b/_modules/recommenders/utils/general_utils.html @@ -0,0 +1,435 @@ + + + + + + + + + + + recommenders.utils.general_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.general_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import psutil
+
+
+
[docs]def invert_dictionary(dictionary): + """Invert a dictionary + + Note: + + If the dictionary has unique keys and unique values, the inversion would be perfect. However, if there are + repeated values, the inversion can take different keys + + Args: + dictionary (dict): A dictionary + + Returns: + dict: inverted dictionary + """ + return {v: k for k, v in dictionary.items()}
+ + +
[docs]def get_physical_memory(): + """Get the physical memory in GBs. + + Returns: + float: Physical memory in GBs. + """ + return psutil.virtual_memory()[0] / 1073741824
+ + +
[docs]def get_number_processors(): + """Get the number of processors in a CPU. + + Returns: + int: Number of processors. + """ + try: + num = os.cpu_count() + except Exception: + import multiprocessing # force exception in case multiprocessing is not installed + + num = multiprocessing.cpu_count() + return num
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/gpu_utils.html b/_modules/recommenders/utils/gpu_utils.html new file mode 100644 index 0000000000..bbecdab7f0 --- /dev/null +++ b/_modules/recommenders/utils/gpu_utils.html @@ -0,0 +1,539 @@ + + + + + + + + + + + recommenders.utils.gpu_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.gpu_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import sys
+import os
+import glob
+import logging
+from numba import cuda
+from numba.cuda.cudadrv.error import CudaSupportError
+
+
+logger = logging.getLogger(__name__)
+
+
+DEFAULT_CUDA_PATH_LINUX = "/usr/local/cuda/version.txt"
+
+
+
[docs]def get_number_gpus(): + """Get the number of GPUs in the system. + Returns: + int: Number of GPUs. + """ + try: + import torch + + return torch.cuda.device_count() + except (ImportError, ModuleNotFoundError): + pass + try: + import numba + + return len(numba.cuda.gpus) + except Exception: # numba.cuda.cudadrv.error.CudaSupportError: + return 0
+ + +
[docs]def get_gpu_info(): + """Get information of GPUs. + + Returns: + list: List of gpu information dictionary as with `device_name`, `total_memory` (in Mb) and `free_memory` (in Mb). + Returns an empty list if there is no cuda device available. + """ + gpus = [] + try: + for gpu in cuda.gpus: + with gpu: + meminfo = cuda.current_context().get_memory_info() + g = { + "device_name": gpu.name.decode("ASCII"), + "total_memory": meminfo[1] / 1048576, # Mb + "free_memory": meminfo[0] / 1048576, # Mb + } + gpus.append(g) + except CudaSupportError: + pass + + return gpus
+ + +
[docs]def clear_memory_all_gpus(): + """Clear memory of all GPUs.""" + try: + for gpu in cuda.gpus: + with gpu: + cuda.current_context().deallocations.clear() + except CudaSupportError: + logger.info("No CUDA available")
+ + +
[docs]def get_cuda_version(): + """Get CUDA version + + Returns: + str: Version of the library. + """ + try: + import torch + + return torch.version.cuda + except (ImportError, ModuleNotFoundError): + path = "" + if sys.platform == "win32": + candidate = ( + "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v*\\version.txt" + ) + path_list = glob.glob(candidate) + if path_list: + path = path_list[0] + elif sys.platform == "linux" or sys.platform == "darwin": + path = "/usr/local/cuda/version.txt" + else: + raise ValueError("Not in Windows, Linux or Mac") + + if os.path.isfile(path): + with open(path, "r") as f: + data = f.read().replace("\n", "") + return data + else: + return None
+ + +
[docs]def get_cudnn_version(): + """Get the CuDNN version + + Returns: + str: Version of the library. + """ + + def find_cudnn_in_headers(candiates): + for c in candidates: + file = glob.glob(c) + if file: + break + if file: + with open(file[0], "r") as f: + version = "" + for line in f: + if "#define CUDNN_MAJOR" in line: + version = line.split()[-1] + if "#define CUDNN_MINOR" in line: + version += "." + line.split()[-1] + if "#define CUDNN_PATCHLEVEL" in line: + version += "." + line.split()[-1] + if version: + return version + else: + return None + else: + return None + + try: + import torch + + return str(torch.backends.cudnn.version()) + except (ImportError, ModuleNotFoundError): + if sys.platform == "win32": + candidates = [r"C:\NVIDIA\cuda\include\cudnn.h"] + elif sys.platform == "linux": + candidates = [ + "/usr/include/cudnn_version.h", + "/usr/include/x86_64-linux-gnu/cudnn_v[0-99].h", + "/usr/local/cuda/include/cudnn.h", + "/usr/include/cudnn.h", + ] + elif sys.platform == "darwin": + candidates = ["/usr/local/cuda/include/cudnn.h", "/usr/include/cudnn.h"] + else: + raise ValueError("Not in Windows, Linux or Mac") + return find_cudnn_in_headers(candidates)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/k8s_utils.html b/_modules/recommenders/utils/k8s_utils.html new file mode 100644 index 0000000000..b8cc09cc92 --- /dev/null +++ b/_modules/recommenders/utils/k8s_utils.html @@ -0,0 +1,470 @@ + + + + + + + + + + + recommenders.utils.k8s_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.k8s_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+from math import ceil, floor
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]def qps_to_replicas( + target_qps, processing_time, max_qp_replica=1, target_utilization=0.7 +): + """Provide a rough estimate of the number of replicas to support a given + load (queries per second) + + Args: + target_qps (int): target queries per second that you want to support + processing_time (float): the estimated amount of time (in seconds) + your service call takes + max_qp_replica (int): maximum number of concurrent queries per replica + target_utilization (float): proportion of CPU utilization you think is ideal + + Returns: + int: Number of estimated replicas required to support a target number of queries per second. + """ + concurrent_queries = target_qps * processing_time / target_utilization + replicas = ceil(concurrent_queries / max_qp_replica) + logger.info( + "Approximately {} replicas are estimated to support {} queries per second.".format( + replicas, target_qps + ) + ) + return replicas
+ + +
[docs]def replicas_to_qps( + num_replicas, processing_time, max_qp_replica=1, target_utilization=0.7 +): + """Provide a rough estimate of the queries per second supported by a number of replicas + + Args: + num_replicas (int): number of replicas + processing_time (float): the estimated amount of time (in seconds) your service call takes + max_qp_replica (int): maximum number of concurrent queries per replica + target_utilization (float): proportion of CPU utilization you think is ideal + + Returns: + int: queries per second supported by the number of replicas + """ + qps = floor(num_replicas * max_qp_replica * target_utilization / processing_time) + logger.info( + "Approximately {} queries per second are supported by {} replicas.".format( + qps, num_replicas + ) + ) + return qps
+ + +
[docs]def nodes_to_replicas(n_cores_per_node, n_nodes=3, cpu_cores_per_replica=0.1): + """Provide a rough estimate of the number of replicas supported by a + given number of nodes with n_cores_per_node cores each + + Args: + n_cores_per_node (int): Total number of cores per node within an AKS + cluster that you want to use + n_nodes (int): Number of nodes (i.e. VMs) used in the AKS cluster + cpu_cores_per_replica (float): Cores assigned to each replica. This + can be fractional and corresponds to the + cpu_cores argument passed to AksWebservice.deploy_configuration() + + Returns: + int: Total number of replicas supported by the configuration + """ + n_cores_avail = (n_cores_per_node - 0.5) * n_nodes - 4.45 + replicas = floor(n_cores_avail / cpu_cores_per_replica) + logger.info( + "Approximately {} replicas are supported by {} nodes with {} cores each.".format( + replicas, n_nodes, n_cores_per_node + ) + ) + return replicas
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/notebook_memory_management.html b/_modules/recommenders/utils/notebook_memory_management.html new file mode 100644 index 0000000000..ef2d83c5eb --- /dev/null +++ b/_modules/recommenders/utils/notebook_memory_management.html @@ -0,0 +1,487 @@ + + + + + + + + + + + recommenders.utils.notebook_memory_management — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.notebook_memory_management

+# Original code: https://raw.githubusercontent.com/miguelgfierro/codebase/master/python/system/notebook_memory_management.py
+#
+# Profile memory usage envelope of IPython commands and report interactively.
+# Usage (inside a python notebook):
+#   from notebook_memory_management import start_watching_memory, stop_watching_memory
+# To start profile:
+#   start_watching_memory()
+# To stop profile:
+#   stop_watching_memory()
+#
+# Based on: https://github.com/ianozsvald/ipython_memory_usage
+#
+
+from __future__ import division  # 1/2 == 0.5, as in Py3
+from __future__ import absolute_import  # avoid hiding global modules with locals
+from __future__ import print_function  # force use of print("hello")
+from __future__ import (
+    unicode_literals,
+)  # force unadorned strings "" to be Unicode without prepending u""
+import time
+import memory_profiler
+from IPython import get_ipython
+import psutil
+import warnings
+
+
+# keep a global accounting for the last known memory usage
+# which is the reference point for the memory delta calculation
+previous_call_memory_usage = memory_profiler.memory_usage()[0]
+t1 = time.time()  # will be set to current time later
+keep_watching = True
+watching_memory = True
+try:
+    input_cells = get_ipython().user_ns["In"]
+except Exception:
+    warnings.warn("Not running on notebook")
+
+
+
[docs]def start_watching_memory(): + """Register memory profiling tools to IPython instance.""" + global watching_memory + watching_memory = True + ip = get_ipython() + ip.events.register("post_run_cell", watch_memory) + ip.events.register("pre_run_cell", pre_run_cell)
+ + +
[docs]def stop_watching_memory(): + """Unregister memory profiling tools from IPython instance.""" + global watching_memory + watching_memory = False + ip = get_ipython() + try: + ip.events.unregister("post_run_cell", watch_memory) + except ValueError: + print("ERROR: problem when unregistering") + pass + try: + ip.events.unregister("pre_run_cell", pre_run_cell) + except ValueError: + print("ERROR: problem when unregistering") + pass
+ + +
[docs]def watch_memory(): + """Bring in the global memory usage value from the previous iteration""" + global previous_call_memory_usage, keep_watching, watching_memory, input_cells + new_memory_usage = memory_profiler.memory_usage()[0] + memory_delta = new_memory_usage - previous_call_memory_usage + keep_watching = False + total_memory = psutil.virtual_memory()[0] / 1024 / 1024 # in Mb + # calculate time delta using global t1 (from the pre-run event) and current time + time_delta_secs = time.time() - t1 + num_commands = len(input_cells) - 1 + cmd = "In [{}]".format(num_commands) + # convert the results into a pretty string + output_template = ( + "{cmd} used {memory_delta:0.4f} Mb RAM in " + "{time_delta:0.2f}s, total RAM usage " + "{memory_usage:0.2f} Mb, total RAM " + "memory {total_memory:0.2f} Mb" + ) + output = output_template.format( + time_delta=time_delta_secs, + cmd=cmd, + memory_delta=memory_delta, + memory_usage=new_memory_usage, + total_memory=total_memory, + ) + if watching_memory: + print(str(output)) + previous_call_memory_usage = new_memory_usage
+ + +
[docs]def pre_run_cell(): + """Capture current time before we execute the current command""" + global t1 + t1 = time.time()
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/notebook_utils.html b/_modules/recommenders/utils/notebook_utils.html new file mode 100644 index 0000000000..795f87da7f --- /dev/null +++ b/_modules/recommenders/utils/notebook_utils.html @@ -0,0 +1,543 @@ + + + + + + + + + + + recommenders.utils.notebook_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.notebook_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+import re
+import nbformat
+from nbconvert.preprocessors import ExecutePreprocessor
+from IPython.display import display
+
+
+NOTEBOOK_OUTPUT_CONTENT_TYPE = "application/notebook_utils.json+json"
+
+
+
[docs]def is_jupyter(): + """Check if the module is running on Jupyter notebook/console. + + Returns: + bool: True if the module is running on Jupyter notebook or Jupyter console, + False otherwise. + """ + try: + shell_name = get_ipython().__class__.__name__ + if shell_name == "ZMQInteractiveShell": + return True + else: + return False + except NameError: + return False
+ + +
[docs]def is_databricks(): + """Check if the module is running on Databricks. + + Returns: + bool: True if the module is running on Databricks notebook, + False otherwise. + """ + try: + if os.path.realpath(".") == "/databricks/driver": + return True + else: + return False + except NameError: + return False
+ + +def _update_parameters(parameter_cell_source, new_parameters): + """Replace parameter values in the cell source code.""" + modified_cell_source = parameter_cell_source + for param, new_value in new_parameters.items(): + if ( + isinstance(new_value, str) + and not (new_value.startswith('"') and new_value.endswith('"')) + and not (new_value.startswith("'") and new_value.endswith("'")) + ): + # Check if the new value is a string and surround it with quotes if necessary + new_value = f'"{new_value}"' + + # Define a regular expression pattern to match parameter assignments and ignore comments + pattern = re.compile(rf"(\b{param})\s*=\s*([^#\n]+)(?:#.*$)?", re.MULTILINE) + modified_cell_source = pattern.sub(rf"\1 = {new_value}", modified_cell_source) + + return modified_cell_source + + +
[docs]def execute_notebook( + input_notebook, output_notebook, parameters={}, kernel_name="python3", timeout=2200 +): + """Execute a notebook while passing parameters to it. + + Note: + Ensure your Jupyter Notebook is set up with parameters that can be + modified and read. Use Markdown cells to specify parameters that need + modification and code cells to set parameters that need to be read. + + Args: + input_notebook (str): Path to the input notebook. + output_notebook (str): Path to the output notebook + parameters (dict): Dictionary of parameters to pass to the notebook. + kernel_name (str): Kernel name. + timeout (int): Timeout (in seconds) for each cell to execute. + """ + + # Load the Jupyter Notebook + with open(input_notebook, "r") as notebook_file: + notebook_content = nbformat.read(notebook_file, as_version=4) + + # Search for and replace parameter values in code cells + for cell in notebook_content.cells: + if ( + "tags" in cell.metadata + and "parameters" in cell.metadata["tags"] + and cell.cell_type == "code" + ): + # Update the cell's source within notebook_content + cell.source = _update_parameters(cell.source, parameters) + + # Create an execution preprocessor + execute_preprocessor = ExecutePreprocessor(timeout=timeout, kernel_name=kernel_name) + + # Execute the notebook + executed_notebook, _ = execute_preprocessor.preprocess( + notebook_content, {"metadata": {"path": "./"}} + ) + + # Save the executed notebook + with open(output_notebook, "w", encoding="utf-8") as executed_notebook_file: + nbformat.write(executed_notebook, executed_notebook_file)
+ + +
[docs]def store_metadata(name, value): + """Store data in the notebook's output source code. + This function is similar to snapbook.glue(). + + Args: + name (str): Name of the data. + value (int,float,str): Value of the data. + """ + + metadata = {"notebook_utils": {"name": name, "data": True, "display": False}} + data_json = { + "application/notebook_utils.json+json": { + "name": name, + "data": value, + "encoder": "json", + } + } + display(data_json, metadata=metadata, raw=True)
+ + +
[docs]def read_notebook(path): + """Read the metadata stored in the notebook's output source code. + This function is similar to snapbook.read_notebook(). + + Args: + path (str): Path to the notebook. + + Returns: + dict: Dictionary of data stored in the notebook. + """ + # Load the Jupyter Notebook + with open(path, "r") as notebook_file: + notebook_content = nbformat.read(notebook_file, as_version=4) + + # Search for parameters and store them in a dictionary + results = {} + for cell in notebook_content.cells: + if cell.cell_type == "code" and "outputs" in cell: + for outputs in cell.outputs: + if "metadata" in outputs and "notebook_utils" in outputs.metadata: + name = outputs.data[NOTEBOOK_OUTPUT_CONTENT_TYPE]["name"] + data = outputs.data[NOTEBOOK_OUTPUT_CONTENT_TYPE]["data"] + results[name] = data + return results
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/plot.html b/_modules/recommenders/utils/plot.html new file mode 100644 index 0000000000..60086f5e03 --- /dev/null +++ b/_modules/recommenders/utils/plot.html @@ -0,0 +1,468 @@ + + + + + + + + + + + recommenders.utils.plot — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.plot

+import matplotlib.pyplot as plt
+
+
+
[docs]def line_graph( + values, + labels, + x_guides=None, + x_name=None, + y_name=None, + x_min_max=None, + y_min_max=None, + legend_loc=None, + subplot=None, + plot_size=(5, 5), +): + """Plot line graph(s). + + Args: + values (list(list(float or tuple)) or list(float or tuple): List of graphs or a graph to plot + E.g. a graph = list(y) or list((y,x)) + labels (list(str) or str): List of labels or a label for graph. + If labels is a string, this function assumes the values is a single graph. + x_guides (list(int)): List of guidelines (a vertical dotted line) + x_name (str): x axis label + y_name (str): y axis label + x_min_max (list or tuple): Min and max value of the x axis + y_min_max (list or tuple): Min and max value of the y axis + legend_loc (str): legend location + subplot (list or tuple): `matplotlib.pyplot.subplot` format. E.g. to draw 1 x 2 subplot, + pass `(1,2,1)` for the first subplot and `(1,2,2)` for the second subplot. + plot_size (list or tuple): Plot size (width, height) + """ + if subplot: + # Setup figure only once + if subplot[2] == 1: + if plot_size: + plt.figure( + figsize=( + plot_size[0] + * subplot[1], # fig width = plot width * num columns + plot_size[1] + * subplot[0], # fig height = plot height * num rows + ) + ) + plt.subplots_adjust(wspace=0.5) + plt.subplot(*subplot) + else: + if plot_size: + plt.figure(figsize=plot_size) + + if isinstance(labels, str): + if isinstance(values[0], (int, float)): + y, x = values, range(len(values)) + else: + y, x = zip(*values) + plt.plot(x, y, label=labels, lw=1) + else: + assert len(values) == len(labels) + for i, v in enumerate(values): + if isinstance(v[0], (int, float)): + y, x = v, range(len(v)) + else: + y, x = zip(*v) + plt.plot(x, y, label=labels[i], lw=1) + + if x_guides: + for x in x_guides: + plt.axvline(x=x, color="gray", lw=1, linestyle="--") + + if x_name: + plt.xlabel(x_name) + if y_name: + plt.ylabel(y_name) + if x_min_max: + plt.xlim(*x_min_max) + if y_min_max: + plt.ylim(*y_min_max) + if legend_loc: + plt.legend(loc=legend_loc)
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/python_utils.html b/_modules/recommenders/utils/python_utils.html new file mode 100644 index 0000000000..89d48f129f --- /dev/null +++ b/_modules/recommenders/utils/python_utils.html @@ -0,0 +1,642 @@ + + + + + + + + + + + recommenders.utils.python_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.python_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import logging
+import numpy as np
+from scipy import sparse
+
+
+logger = logging.getLogger()
+
+
+
[docs]def exponential_decay(value, max_val, half_life): + """Compute decay factor for a given value based on an exponential decay. + + Values greater than `max_val` will be set to 1. + + Args: + value (numeric): Value to calculate decay factor + max_val (numeric): Value at which decay factor will be 1 + half_life (numeric): Value at which decay factor will be 0.5 + + Returns: + float: Decay factor + """ + return np.minimum(1.0, np.power(0.5, (max_val - value) / half_life))
+ + +def _get_row_and_column_matrix(array): + """Helper method to get the row and column matrix from an array. + + Args: + array (numpy.ndarray): the array from which to get the row and column matrix. + + Returns: + (numpy.ndarray, numpy.ndarray): (row matrix, column matrix) + """ + row_matrix = np.expand_dims(array, axis=0) + column_matrix = np.expand_dims(array, axis=1) + return row_matrix, column_matrix + + +
[docs]def jaccard(cooccurrence): + """Helper method to calculate the Jaccard similarity of a matrix of + co-occurrences. When comparing Jaccard with count co-occurrence + and lift similarity, count favours predictability, meaning that + the most popular items will be recommended most of the time. Lift, + by contrast, favours discoverability/serendipity, meaning that an + item that is less popular overall but highly favoured by a small + subset of users is more likely to be recommended. Jaccard is a + compromise between the two. + + Args: + cooccurrence (numpy.ndarray): the symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of Jaccard similarities between any two items. + + """ + + diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) + + with np.errstate(invalid="ignore", divide="ignore"): + result = cooccurrence / (diag_rows + diag_cols - cooccurrence) + + return np.array(result)
+ + +
[docs]def lift(cooccurrence): + """Helper method to calculate the Lift of a matrix of + co-occurrences. In comparison with basic co-occurrence and Jaccard + similarity, lift favours discoverability and serendipity, as + opposed to co-occurrence that favours the most popular items, and + Jaccard that is a compromise between the two. + + Args: + cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of Lifts between any two items. + + """ + + diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) + + with np.errstate(invalid="ignore", divide="ignore"): + result = cooccurrence / (diag_rows * diag_cols) + + return np.array(result)
+ + +
[docs]def mutual_information(cooccurrence): + """Helper method to calculate the Mutual Information of a matrix of + co-occurrences. + + Mutual information is a measurement of the amount of information + explained by the i-th j-th item column vector. + + Args: + cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of mutual information between any two items. + + """ + + with np.errstate(invalid="ignore", divide="ignore"): + result = np.log2(cooccurrence.shape[0] * lift(cooccurrence)) + + return np.array(result)
+ + +
[docs]def lexicographers_mutual_information(cooccurrence): + """Helper method to calculate the Lexicographers Mutual Information of + a matrix of co-occurrences. + + Due to the bias of mutual information for low frequency items, + lexicographers mutual information corrects the formula by + multiplying it by the co-occurrence frequency. + + Args: + cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of lexicographers mutual information between any two items. + + """ + + with np.errstate(invalid="ignore", divide="ignore"): + result = cooccurrence * mutual_information(cooccurrence) + + return np.array(result)
+ + +
[docs]def cosine_similarity(cooccurrence): + """Helper method to calculate the Cosine similarity of a matrix of + co-occurrences. + + Cosine similarity can be interpreted as the angle between the i-th + and j-th item. + + Args: + cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of cosine similarity between any two items. + + """ + + diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) + + with np.errstate(invalid="ignore", divide="ignore"): + result = cooccurrence / np.sqrt(diag_rows * diag_cols) + + return np.array(result)
+ + +
[docs]def inclusion_index(cooccurrence): + """Helper method to calculate the Inclusion Index of a matrix of + co-occurrences. + + Inclusion index measures the overlap between items. + + Args: + cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items. + + Returns: + numpy.ndarray: The matrix of inclusion index between any two items. + + """ + + diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal()) + + with np.errstate(invalid="ignore", divide="ignore"): + result = cooccurrence / np.minimum(diag_rows, diag_cols) + + return np.array(result)
+ + +
[docs]def get_top_k_scored_items(scores, top_k, sort_top_k=False): + """Extract top K items from a matrix of scores for each user-item pair, optionally sort results per user. + + Args: + scores (numpy.ndarray): Score matrix (users x items). + top_k (int): Number of top items to recommend. + sort_top_k (bool): Flag to sort top k results. + + Returns: + numpy.ndarray, numpy.ndarray: + - Indices into score matrix for each user's top items. + - Scores corresponding to top items. + + """ + + # ensure we're working with a dense ndarray + if isinstance(scores, sparse.spmatrix): + scores = scores.todense() + + if scores.shape[1] < top_k: + logger.warning( + "Number of items is less than top_k, limiting top_k to number of items" + ) + k = min(top_k, scores.shape[1]) + + test_user_idx = np.arange(scores.shape[0])[:, None] + + # get top K items and scores + # this determines the un-ordered top-k item indices for each user + top_items = np.argpartition(scores, -k, axis=1)[:, -k:] + top_scores = scores[test_user_idx, top_items] + + if sort_top_k: + sort_ind = np.argsort(-top_scores) + top_items = top_items[test_user_idx, sort_ind] + top_scores = top_scores[test_user_idx, sort_ind] + + return np.array(top_items), np.array(top_scores)
+ + +
[docs]def binarize(a, threshold): + """Binarize the values. + + Args: + a (numpy.ndarray): Input array that needs to be binarized. + threshold (float): Threshold below which all values are set to 0, else 1. + + Returns: + numpy.ndarray: Binarized array. + """ + return np.where(a > threshold, 1.0, 0.0)
+ + +
[docs]def rescale(data, new_min=0, new_max=1, data_min=None, data_max=None): + """Rescale/normalize the data to be within the range `[new_min, new_max]` + If data_min and data_max are explicitly provided, they will be used + as the old min/max values instead of taken from the data. + + Note: + This is same as the `scipy.MinMaxScaler` with the exception that we can override + the min/max of the old scale. + + Args: + data (numpy.ndarray): 1d scores vector or 2d score matrix (users x items). + new_min (int|float): The minimum of the newly scaled data. + new_max (int|float): The maximum of the newly scaled data. + data_min (None|number): The minimum of the passed data [if omitted it will be inferred]. + data_max (None|number): The maximum of the passed data [if omitted it will be inferred]. + + Returns: + numpy.ndarray: The newly scaled/normalized data. + """ + data_min = data.min() if data_min is None else data_min + data_max = data.max() if data_max is None else data_max + return (data - data_min) / (data_max - data_min) * (new_max - new_min) + new_min
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/spark_utils.html b/_modules/recommenders/utils/spark_utils.html new file mode 100644 index 0000000000..91bcd802c4 --- /dev/null +++ b/_modules/recommenders/utils/spark_utils.html @@ -0,0 +1,462 @@ + + + + + + + + + + + recommenders.utils.spark_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.spark_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import os
+
+
+try:
+    from pyspark.sql import SparkSession  # noqa: F401
+except ImportError:
+    pass  # skip this import if we are in pure python environment
+
+MMLSPARK_PACKAGE = "com.microsoft.azure:synapseml_2.12:0.9.5"
+MMLSPARK_REPO = "https://mmlspark.azureedge.net/maven"
+# We support Spark v3, but in case you wish to use v2, set
+# MMLSPARK_PACKAGE = "com.microsoft.ml.spark:mmlspark_2.11:0.18.1"
+# MMLSPARK_REPO = "https://mvnrepository.com/artifact"
+
+
+
[docs]def start_or_get_spark( + app_name="Sample", + url="local[*]", + memory="10g", + config=None, + packages=None, + jars=None, + repositories=None, +): + """Start Spark if not started + + Args: + app_name (str): set name of the application + url (str): URL for spark master + memory (str): size of memory for spark driver. This will be ignored if spark.driver.memory is set in config. + config (dict): dictionary of configuration options + packages (list): list of packages to install + jars (list): list of jar files to add + repositories (list): list of maven repositories + + Returns: + object: Spark context. + """ + + submit_args = "" + if packages is not None: + submit_args = "--packages {} ".format(",".join(packages)) + if jars is not None: + submit_args += "--jars {} ".format(",".join(jars)) + if repositories is not None: + submit_args += "--repositories {}".format(",".join(repositories)) + if submit_args: + os.environ["PYSPARK_SUBMIT_ARGS"] = "{} pyspark-shell".format(submit_args) + + spark_opts = [ + 'SparkSession.builder.appName("{}")'.format(app_name), + 'master("{}")'.format(url), + ] + + if config is not None: + for key, raw_value in config.items(): + value = ( + '"{}"'.format(raw_value) if isinstance(raw_value, str) else raw_value + ) + spark_opts.append('config("{key}", {value})'.format(key=key, value=value)) + + if config is None or "spark.driver.memory" not in config: + spark_opts.append('config("spark.driver.memory", "{}")'.format(memory)) + + # Set larger stack size + spark_opts.append('config("spark.executor.extraJavaOptions", "-Xss4m")') + spark_opts.append('config("spark.driver.extraJavaOptions", "-Xss4m")') + + spark_opts.append("getOrCreate()") + return eval(".".join(spark_opts))
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/tf_utils.html b/_modules/recommenders/utils/tf_utils.html new file mode 100644 index 0000000000..e8e88dafea --- /dev/null +++ b/_modules/recommenders/utils/tf_utils.html @@ -0,0 +1,748 @@ + + + + + + + + + + + recommenders.utils.tf_utils — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.tf_utils

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+import itertools
+import numpy as np
+import tensorflow as tf
+from tensorflow_estimator.python.estimator.export.export import (
+    build_supervised_input_receiver_fn_from_input_fn,
+)
+
+MODEL_DIR = "model_checkpoints"
+
+
+OPTIMIZERS = dict(
+    adadelta=tf.compat.v1.train.AdadeltaOptimizer,
+    adagrad=tf.compat.v1.train.AdagradOptimizer,
+    adam=tf.compat.v1.train.AdamOptimizer,
+    ftrl=tf.compat.v1.train.FtrlOptimizer,
+    momentum=tf.compat.v1.train.MomentumOptimizer,
+    rmsprop=tf.compat.v1.train.RMSPropOptimizer,
+    sgd=tf.compat.v1.train.GradientDescentOptimizer,
+)
+
+
+
[docs]def pandas_input_fn_for_saved_model(df, feat_name_type): + """Pandas input function for TensorFlow SavedModel. + + Args: + df (pandas.DataFrame): Data containing features. + feat_name_type (dict): Feature name and type spec. E.g. + `{'userID': int, 'itemID': int, 'rating': float}` + + Returns: + func: Input function + + """ + for feat_type in feat_name_type.values(): + assert feat_type in (int, float, list) + + def input_fn(): + examples = [None] * len(df) + for i, sample in df.iterrows(): + ex = tf.train.Example() + for feat_name, feat_type in feat_name_type.items(): + feat = ex.features.feature[feat_name] + if feat_type == int: + feat.int64_list.value.extend([sample[feat_name]]) + elif feat_type == float: + feat.float_list.value.extend([sample[feat_name]]) + elif feat_type == list: + feat.float_list.value.extend(sample[feat_name]) + examples[i] = ex.SerializeToString() + return {"inputs": tf.constant(examples)} + + return input_fn
+ + +
[docs]def pandas_input_fn( + df, y_col=None, batch_size=128, num_epochs=1, shuffle=False, seed=None +): + """Pandas input function for TensorFlow high-level API Estimator. + This function returns a `tf.data.Dataset` function. + + Note: + `tf.estimator.inputs.pandas_input_fn` cannot handle array/list column properly. + For more information, see https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn + + Args: + df (pandas.DataFrame): Data containing features. + y_col (str): Label column name if df has it. + batch_size (int): Batch size for the input function. + num_epochs (int): Number of epochs to iterate over data. If `None`, it will run forever. + shuffle (bool): If True, shuffles the data queue. + seed (int): Random seed for shuffle. + + Returns: + tf.data.Dataset: Function. + """ + + X_df = df.copy() + if y_col is not None: + y = X_df.pop(y_col).values + else: + y = None + + X = {} + for col in X_df.columns: + values = X_df[col].values + if isinstance(values[0], (list, np.ndarray)): + values = np.array(values.tolist(), dtype=np.float32) + X[col] = values + + return lambda: _dataset( + x=X, + y=y, + batch_size=batch_size, + num_epochs=num_epochs, + shuffle=shuffle, + seed=seed, + )
+ + +def _dataset(x, y=None, batch_size=128, num_epochs=1, shuffle=False, seed=None): + if y is None: + dataset = tf.data.Dataset.from_tensor_slices(x) + else: + dataset = tf.data.Dataset.from_tensor_slices((x, y)) + + if shuffle: + dataset = dataset.shuffle( + 1000, seed=seed, reshuffle_each_iteration=True # buffer size = 1000 + ) + elif seed is not None: + import warnings + + warnings.warn("Seed was set but `shuffle=False`. Seed will be ignored.") + + return dataset.repeat(num_epochs).batch(batch_size) + + +
[docs]def build_optimizer(name, lr=0.001, **kwargs): + """Get an optimizer for TensorFlow high-level API Estimator. + + Available options are: `adadelta`, `adagrad`, `adam`, `ftrl`, `momentum`, `rmsprop` or `sgd`. + + Args: + name (str): Optimizer name. + lr (float): Learning rate + kwargs: Optimizer arguments as key-value pairs + + Returns: + tf.train.Optimizer: Tensorflow optimizer. + """ + name = name.lower() + + try: + optimizer_class = OPTIMIZERS[name] + except KeyError: + raise KeyError("Optimizer name should be one of: {}".format(list(OPTIMIZERS))) + + # Set parameters + params = {} + if name == "ftrl": + params["l1_regularization_strength"] = kwargs.get( + "l1_regularization_strength", 0.0 + ) + params["l2_regularization_strength"] = kwargs.get( + "l2_regularization_strength", 0.0 + ) + elif name == "momentum" or name == "rmsprop": + params["momentum"] = kwargs.get("momentum", 0.0) + + return optimizer_class(learning_rate=lr, **params)
+ + +
[docs]def export_model(model, train_input_fn, eval_input_fn, tf_feat_cols, base_dir): + """Export TensorFlow estimator (model). + + Args: + model (tf.estimator.Estimator): Model to export. + train_input_fn (function): Training input function to create data receiver spec. + eval_input_fn (function): Evaluation input function to create data receiver spec. + tf_feat_cols (list(tf.feature_column)): Feature columns. + base_dir (str): Base directory to export the model. + + Returns: + str: Exported model path + """ + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + train_rcvr_fn = build_supervised_input_receiver_fn_from_input_fn(train_input_fn) + eval_rcvr_fn = build_supervised_input_receiver_fn_from_input_fn(eval_input_fn) + serve_rcvr_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn( + tf.feature_column.make_parse_example_spec(tf_feat_cols) + ) + rcvr_fn_map = { + tf.estimator.ModeKeys.TRAIN: train_rcvr_fn, + tf.estimator.ModeKeys.EVAL: eval_rcvr_fn, + tf.estimator.ModeKeys.PREDICT: serve_rcvr_fn, + } + exported_path = model.experimental_export_all_saved_models( + export_dir_base=base_dir, input_receiver_fn_map=rcvr_fn_map + ) + + return exported_path.decode("utf-8")
+ + +
[docs]def evaluation_log_hook( + estimator, + logger, + true_df, + y_col, + eval_df, + every_n_iter=10000, + model_dir=None, + batch_size=256, + eval_fns=None, + **eval_kwargs +): + """Evaluation log hook for TensorFlow high-level API Estimator. + + Note: + TensorFlow Estimator model uses the last checkpoint weights for evaluation or prediction. + In order to get the most up-to-date evaluation results while training, + set model's `save_checkpoints_steps` to be equal or greater than hook's `every_n_iter`. + + Args: + estimator (tf.estimator.Estimator): Model to evaluate. + logger (Logger): Custom logger to log the results. + E.g., define a subclass of Logger for AzureML logging. + true_df (pd.DataFrame): Ground-truth data. + y_col (str): Label column name in true_df + eval_df (pd.DataFrame): Evaluation data without label column. + every_n_iter (int): Evaluation frequency (steps). + model_dir (str): Model directory to save the summaries to. If None, does not record. + batch_size (int): Number of samples fed into the model at a time. + Note, the batch size doesn't affect on evaluation results. + eval_fns (iterable of functions): List of evaluation functions that have signature of + `(true_df, prediction_df, **eval_kwargs)`->`float`. If None, loss is calculated on `true_df`. + eval_kwargs: Evaluation function's keyword arguments. + Note, prediction column name should be 'prediction' + + Returns: + tf.train.SessionRunHook: Session run hook to evaluate the model while training. + """ + + return _TrainLogHook( + estimator, + logger, + true_df, + y_col, + eval_df, + every_n_iter, + model_dir, + batch_size, + eval_fns, + **eval_kwargs + )
+ + +class _TrainLogHook(tf.estimator.SessionRunHook): + def __init__( + self, + estimator, + logger, + true_df, + y_col, + eval_df, + every_n_iter=10000, + model_dir=None, + batch_size=256, + eval_fns=None, + **eval_kwargs + ): + """Evaluation log hook class""" + self.model = estimator + self.logger = logger + self.true_df = true_df + self.y_col = y_col + self.eval_df = eval_df + self.every_n_iter = every_n_iter + self.model_dir = model_dir + self.batch_size = batch_size + self.eval_fns = eval_fns + self.eval_kwargs = eval_kwargs + + self.summary_writer = None + self.global_step_tensor = None + self.step = 0 + + def begin(self): + if self.model_dir is not None: + self.summary_writer = tf.compat.v1.summary.FileWriterCache.get( + self.model_dir + ) + self.global_step_tensor = tf.compat.v1.train.get_or_create_global_step() + else: + self.step = 0 + + def before_run(self, run_context): + if self.global_step_tensor is not None: + requests = {"global_step": self.global_step_tensor} + return tf.estimator.SessionRunArgs(requests) + else: + return None + + def after_run(self, run_context, run_values): + if self.global_step_tensor is not None: + self.step = run_values.results["global_step"] + else: + self.step += 1 + + if self.step % self.every_n_iter == 0: + _prev_log_level = tf.compat.v1.logging.get_verbosity() + tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) + + if self.eval_fns is None: + result = self.model.evaluate( + input_fn=pandas_input_fn( + df=self.true_df, y_col=self.y_col, batch_size=self.batch_size + ) + )["average_loss"] + self._log("validation_loss", result) + else: + predictions = list( + itertools.islice( + self.model.predict( + input_fn=pandas_input_fn( + df=self.eval_df, batch_size=self.batch_size + ) + ), + len(self.eval_df), + ) + ) + prediction_df = self.eval_df.copy() + prediction_df["prediction"] = [p["predictions"][0] for p in predictions] + for fn in self.eval_fns: + result = fn(self.true_df, prediction_df, **self.eval_kwargs) + self._log(fn.__name__, result) + + tf.compat.v1.logging.set_verbosity(_prev_log_level) + + def end(self, session): + if self.summary_writer is not None: + self.summary_writer.flush() + + def _log(self, tag, value): + self.logger.log(tag, value) + if self.summary_writer is not None: + summary = tf.compat.v1.Summary( + value=[tf.compat.v1.Summary.Value(tag=tag, simple_value=value)] + ) + self.summary_writer.add_summary(summary, self.step) + + +
[docs]class MetricsLogger: + """Metrics logger""" + + def __init__(self): + """Initializer""" + self._log = {} + +
[docs] def log(self, metric, value): + """Log metrics. Each metric's log will be stored in the corresponding list. + + Args: + metric (str): Metric name. + value (float): Value. + """ + if metric not in self._log: + self._log[metric] = [] + self._log[metric].append(value)
+ +
[docs] def get_log(self): + """Getter + + Returns: + dict: Log metrics. + """ + return self._log
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_modules/recommenders/utils/timer.html b/_modules/recommenders/utils/timer.html new file mode 100644 index 0000000000..ec358a95c8 --- /dev/null +++ b/_modules/recommenders/utils/timer.html @@ -0,0 +1,458 @@ + + + + + + + + + + + recommenders.utils.timer — Recommenders documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + + + + + + + + +
+
+
+
+
+ + + +
+
+ + + +
+ + + +
+ +
+
+ +
+
+ +
+ +
+ +
+ + +
+ +
+ +
+ + + + + + + + + + + + + + + + + + + +
+ +
+ +
+
+ + + +
+

+ +
+
+ +
+
+
+ + + + +
+ +

Source code for recommenders.utils.timer

+# Copyright (c) Recommenders contributors.
+# Licensed under the MIT License.
+
+from timeit import default_timer
+
+
+
[docs]class Timer(object): + """Timer class. + + `Original code <https://github.com/miguelgfierro/pybase/blob/2298172a13fb4a243754acbc6029a4a2dcf72c20/log_base/timer.py>`_. + + Examples: + >>> import time + >>> t = Timer() + >>> t.start() + >>> time.sleep(1) + >>> t.stop() + >>> t.interval < 1 + True + >>> with Timer() as t: + ... time.sleep(1) + >>> t.interval < 1 + True + >>> "Time elapsed {}".format(t) #doctest: +ELLIPSIS + 'Time elapsed 1...' + """ + + def __init__(self): + self._timer = default_timer + self._interval = 0 + self.running = False + + def __enter__(self): + self.start() + return self + + def __exit__(self, *args): + self.stop() + + def __str__(self): + return "{:0.4f}".format(self.interval) + +
[docs] def start(self): + """Start the timer.""" + self.init = self._timer() + self.running = True
+ +
[docs] def stop(self): + """Stop the timer. Calculate the interval in seconds.""" + self.end = self._timer() + try: + self._interval = self.end - self.init + self.running = False + except AttributeError: + raise ValueError( + "Timer has not been initialized: use start() or the contextual form with Timer() as t:" + )
+ + @property + def interval(self): + """Get time interval in seconds. + + Returns: + float: Seconds. + """ + if self.running: + raise ValueError("Timer has not been stopped, please use stop().") + else: + return self._interval
+
+ +
+ + + + + + +
+ +
+
+
+ +
+ + + + +
+
+ + +
+ + +
+
+
+ + + + + +
+
+ + \ No newline at end of file diff --git a/_sources/TEMP.md b/_sources/TEMP.md new file mode 100644 index 0000000000..867e2c8492 --- /dev/null +++ b/_sources/TEMP.md @@ -0,0 +1 @@ +# Placeholder \ No newline at end of file diff --git a/docs/datasets.rst b/_sources/datasets.rst similarity index 100% rename from docs/datasets.rst rename to _sources/datasets.rst diff --git a/docs/evaluation.rst b/_sources/evaluation.rst similarity index 100% rename from docs/evaluation.rst rename to _sources/evaluation.rst diff --git a/docs/intro.md b/_sources/intro.md similarity index 100% rename from docs/intro.md rename to _sources/intro.md diff --git a/docs/models.rst b/_sources/models.rst similarity index 100% rename from docs/models.rst rename to _sources/models.rst diff --git a/docs/tuning.rst b/_sources/tuning.rst similarity index 100% rename from docs/tuning.rst rename to _sources/tuning.rst diff --git a/docs/utils.rst b/_sources/utils.rst similarity index 100% rename from docs/utils.rst rename to _sources/utils.rst diff --git a/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css b/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css new file mode 100644 index 0000000000..3225661c25 --- /dev/null +++ b/_sphinx_design_static/design-style.4045f2051d55cab465a707391d5b2007.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #007bff;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0069d9;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/_sphinx_design_static/design-tabs.js b/_sphinx_design_static/design-tabs.js new file mode 100644 index 0000000000..36b38cf0d9 --- /dev/null +++ b/_sphinx_design_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 0000000000..8549469dc2 --- /dev/null +++ b/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,134 @@ +/* + * _sphinx_javascript_frameworks_compat.js + * ~~~~~~~~~~ + * + * Compatability shim for jQuery and underscores.js. + * + * WILL BE REMOVED IN Sphinx 6.0 + * xref RemovedInSphinx60Warning + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 0000000000..9e364ed34f --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,930 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 270px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} +nav.contents, +aside.topic, + +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ +nav.contents, +aside.topic, + +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, + +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, + +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +/* Docutils 0.17 and older (footnotes & citations) */ +dl.footnote > dt, +dl.citation > dt { + float: left; + margin-right: 0.5em; +} + +dl.footnote > dd, +dl.citation > dd { + margin-bottom: 0em; +} + +dl.footnote > dd:after, +dl.citation > dd:after { + content: ""; + clear: both; +} + +/* Docutils 0.18+ (footnotes & citations) */ +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +/* Footnotes & citations ends */ + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dt:after { + content: ":"; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/check-solid.svg b/_static/check-solid.svg new file mode 100644 index 0000000000..92fad4b5c0 --- /dev/null +++ b/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/_static/clipboard.min.js b/_static/clipboard.min.js new file mode 100644 index 0000000000..54b3c46381 --- /dev/null +++ b/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/_static/copybutton.css b/_static/copybutton.css new file mode 100644 index 0000000000..f1916ec7d1 --- /dev/null +++ b/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/_static/copybutton.js b/_static/copybutton.js new file mode 100644 index 0000000000..2ea7ff3e21 --- /dev/null +++ b/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '', false, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/_static/copybutton_funcs.js b/_static/copybutton_funcs.js new file mode 100644 index 0000000000..dbe1aaad79 --- /dev/null +++ b/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css b/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css new file mode 100644 index 0000000000..3225661c25 --- /dev/null +++ b/_static/design-style.4045f2051d55cab465a707391d5b2007.min.css @@ -0,0 +1 @@ +.sd-bg-primary{background-color:var(--sd-color-primary) !important}.sd-bg-text-primary{color:var(--sd-color-primary-text) !important}button.sd-bg-primary:focus,button.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}a.sd-bg-primary:focus,a.sd-bg-primary:hover{background-color:var(--sd-color-primary-highlight) !important}.sd-bg-secondary{background-color:var(--sd-color-secondary) !important}.sd-bg-text-secondary{color:var(--sd-color-secondary-text) !important}button.sd-bg-secondary:focus,button.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}a.sd-bg-secondary:focus,a.sd-bg-secondary:hover{background-color:var(--sd-color-secondary-highlight) !important}.sd-bg-success{background-color:var(--sd-color-success) !important}.sd-bg-text-success{color:var(--sd-color-success-text) !important}button.sd-bg-success:focus,button.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}a.sd-bg-success:focus,a.sd-bg-success:hover{background-color:var(--sd-color-success-highlight) !important}.sd-bg-info{background-color:var(--sd-color-info) !important}.sd-bg-text-info{color:var(--sd-color-info-text) !important}button.sd-bg-info:focus,button.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}a.sd-bg-info:focus,a.sd-bg-info:hover{background-color:var(--sd-color-info-highlight) !important}.sd-bg-warning{background-color:var(--sd-color-warning) !important}.sd-bg-text-warning{color:var(--sd-color-warning-text) !important}button.sd-bg-warning:focus,button.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}a.sd-bg-warning:focus,a.sd-bg-warning:hover{background-color:var(--sd-color-warning-highlight) !important}.sd-bg-danger{background-color:var(--sd-color-danger) !important}.sd-bg-text-danger{color:var(--sd-color-danger-text) !important}button.sd-bg-danger:focus,button.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}a.sd-bg-danger:focus,a.sd-bg-danger:hover{background-color:var(--sd-color-danger-highlight) !important}.sd-bg-light{background-color:var(--sd-color-light) !important}.sd-bg-text-light{color:var(--sd-color-light-text) !important}button.sd-bg-light:focus,button.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}a.sd-bg-light:focus,a.sd-bg-light:hover{background-color:var(--sd-color-light-highlight) !important}.sd-bg-muted{background-color:var(--sd-color-muted) !important}.sd-bg-text-muted{color:var(--sd-color-muted-text) !important}button.sd-bg-muted:focus,button.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}a.sd-bg-muted:focus,a.sd-bg-muted:hover{background-color:var(--sd-color-muted-highlight) !important}.sd-bg-dark{background-color:var(--sd-color-dark) !important}.sd-bg-text-dark{color:var(--sd-color-dark-text) !important}button.sd-bg-dark:focus,button.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}a.sd-bg-dark:focus,a.sd-bg-dark:hover{background-color:var(--sd-color-dark-highlight) !important}.sd-bg-black{background-color:var(--sd-color-black) !important}.sd-bg-text-black{color:var(--sd-color-black-text) !important}button.sd-bg-black:focus,button.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}a.sd-bg-black:focus,a.sd-bg-black:hover{background-color:var(--sd-color-black-highlight) !important}.sd-bg-white{background-color:var(--sd-color-white) !important}.sd-bg-text-white{color:var(--sd-color-white-text) !important}button.sd-bg-white:focus,button.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}a.sd-bg-white:focus,a.sd-bg-white:hover{background-color:var(--sd-color-white-highlight) !important}.sd-text-primary,.sd-text-primary>p{color:var(--sd-color-primary) !important}a.sd-text-primary:focus,a.sd-text-primary:hover{color:var(--sd-color-primary-highlight) !important}.sd-text-secondary,.sd-text-secondary>p{color:var(--sd-color-secondary) !important}a.sd-text-secondary:focus,a.sd-text-secondary:hover{color:var(--sd-color-secondary-highlight) !important}.sd-text-success,.sd-text-success>p{color:var(--sd-color-success) !important}a.sd-text-success:focus,a.sd-text-success:hover{color:var(--sd-color-success-highlight) !important}.sd-text-info,.sd-text-info>p{color:var(--sd-color-info) !important}a.sd-text-info:focus,a.sd-text-info:hover{color:var(--sd-color-info-highlight) !important}.sd-text-warning,.sd-text-warning>p{color:var(--sd-color-warning) !important}a.sd-text-warning:focus,a.sd-text-warning:hover{color:var(--sd-color-warning-highlight) !important}.sd-text-danger,.sd-text-danger>p{color:var(--sd-color-danger) !important}a.sd-text-danger:focus,a.sd-text-danger:hover{color:var(--sd-color-danger-highlight) !important}.sd-text-light,.sd-text-light>p{color:var(--sd-color-light) !important}a.sd-text-light:focus,a.sd-text-light:hover{color:var(--sd-color-light-highlight) !important}.sd-text-muted,.sd-text-muted>p{color:var(--sd-color-muted) !important}a.sd-text-muted:focus,a.sd-text-muted:hover{color:var(--sd-color-muted-highlight) !important}.sd-text-dark,.sd-text-dark>p{color:var(--sd-color-dark) !important}a.sd-text-dark:focus,a.sd-text-dark:hover{color:var(--sd-color-dark-highlight) !important}.sd-text-black,.sd-text-black>p{color:var(--sd-color-black) !important}a.sd-text-black:focus,a.sd-text-black:hover{color:var(--sd-color-black-highlight) !important}.sd-text-white,.sd-text-white>p{color:var(--sd-color-white) !important}a.sd-text-white:focus,a.sd-text-white:hover{color:var(--sd-color-white-highlight) !important}.sd-outline-primary{border-color:var(--sd-color-primary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-primary:focus,a.sd-outline-primary:hover{border-color:var(--sd-color-primary-highlight) !important}.sd-outline-secondary{border-color:var(--sd-color-secondary) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-secondary:focus,a.sd-outline-secondary:hover{border-color:var(--sd-color-secondary-highlight) !important}.sd-outline-success{border-color:var(--sd-color-success) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-success:focus,a.sd-outline-success:hover{border-color:var(--sd-color-success-highlight) !important}.sd-outline-info{border-color:var(--sd-color-info) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-info:focus,a.sd-outline-info:hover{border-color:var(--sd-color-info-highlight) !important}.sd-outline-warning{border-color:var(--sd-color-warning) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-warning:focus,a.sd-outline-warning:hover{border-color:var(--sd-color-warning-highlight) !important}.sd-outline-danger{border-color:var(--sd-color-danger) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-danger:focus,a.sd-outline-danger:hover{border-color:var(--sd-color-danger-highlight) !important}.sd-outline-light{border-color:var(--sd-color-light) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-light:focus,a.sd-outline-light:hover{border-color:var(--sd-color-light-highlight) !important}.sd-outline-muted{border-color:var(--sd-color-muted) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-muted:focus,a.sd-outline-muted:hover{border-color:var(--sd-color-muted-highlight) !important}.sd-outline-dark{border-color:var(--sd-color-dark) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-dark:focus,a.sd-outline-dark:hover{border-color:var(--sd-color-dark-highlight) !important}.sd-outline-black{border-color:var(--sd-color-black) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-black:focus,a.sd-outline-black:hover{border-color:var(--sd-color-black-highlight) !important}.sd-outline-white{border-color:var(--sd-color-white) !important;border-style:solid !important;border-width:1px !important}a.sd-outline-white:focus,a.sd-outline-white:hover{border-color:var(--sd-color-white-highlight) !important}.sd-bg-transparent{background-color:transparent !important}.sd-outline-transparent{border-color:transparent !important}.sd-text-transparent{color:transparent !important}.sd-p-0{padding:0 !important}.sd-pt-0,.sd-py-0{padding-top:0 !important}.sd-pr-0,.sd-px-0{padding-right:0 !important}.sd-pb-0,.sd-py-0{padding-bottom:0 !important}.sd-pl-0,.sd-px-0{padding-left:0 !important}.sd-p-1{padding:.25rem !important}.sd-pt-1,.sd-py-1{padding-top:.25rem !important}.sd-pr-1,.sd-px-1{padding-right:.25rem !important}.sd-pb-1,.sd-py-1{padding-bottom:.25rem !important}.sd-pl-1,.sd-px-1{padding-left:.25rem !important}.sd-p-2{padding:.5rem !important}.sd-pt-2,.sd-py-2{padding-top:.5rem !important}.sd-pr-2,.sd-px-2{padding-right:.5rem !important}.sd-pb-2,.sd-py-2{padding-bottom:.5rem !important}.sd-pl-2,.sd-px-2{padding-left:.5rem !important}.sd-p-3{padding:1rem !important}.sd-pt-3,.sd-py-3{padding-top:1rem !important}.sd-pr-3,.sd-px-3{padding-right:1rem !important}.sd-pb-3,.sd-py-3{padding-bottom:1rem !important}.sd-pl-3,.sd-px-3{padding-left:1rem !important}.sd-p-4{padding:1.5rem !important}.sd-pt-4,.sd-py-4{padding-top:1.5rem !important}.sd-pr-4,.sd-px-4{padding-right:1.5rem !important}.sd-pb-4,.sd-py-4{padding-bottom:1.5rem !important}.sd-pl-4,.sd-px-4{padding-left:1.5rem !important}.sd-p-5{padding:3rem !important}.sd-pt-5,.sd-py-5{padding-top:3rem !important}.sd-pr-5,.sd-px-5{padding-right:3rem !important}.sd-pb-5,.sd-py-5{padding-bottom:3rem !important}.sd-pl-5,.sd-px-5{padding-left:3rem !important}.sd-m-auto{margin:auto !important}.sd-mt-auto,.sd-my-auto{margin-top:auto !important}.sd-mr-auto,.sd-mx-auto{margin-right:auto !important}.sd-mb-auto,.sd-my-auto{margin-bottom:auto !important}.sd-ml-auto,.sd-mx-auto{margin-left:auto !important}.sd-m-0{margin:0 !important}.sd-mt-0,.sd-my-0{margin-top:0 !important}.sd-mr-0,.sd-mx-0{margin-right:0 !important}.sd-mb-0,.sd-my-0{margin-bottom:0 !important}.sd-ml-0,.sd-mx-0{margin-left:0 !important}.sd-m-1{margin:.25rem !important}.sd-mt-1,.sd-my-1{margin-top:.25rem !important}.sd-mr-1,.sd-mx-1{margin-right:.25rem !important}.sd-mb-1,.sd-my-1{margin-bottom:.25rem !important}.sd-ml-1,.sd-mx-1{margin-left:.25rem !important}.sd-m-2{margin:.5rem !important}.sd-mt-2,.sd-my-2{margin-top:.5rem !important}.sd-mr-2,.sd-mx-2{margin-right:.5rem !important}.sd-mb-2,.sd-my-2{margin-bottom:.5rem !important}.sd-ml-2,.sd-mx-2{margin-left:.5rem !important}.sd-m-3{margin:1rem !important}.sd-mt-3,.sd-my-3{margin-top:1rem !important}.sd-mr-3,.sd-mx-3{margin-right:1rem !important}.sd-mb-3,.sd-my-3{margin-bottom:1rem !important}.sd-ml-3,.sd-mx-3{margin-left:1rem !important}.sd-m-4{margin:1.5rem !important}.sd-mt-4,.sd-my-4{margin-top:1.5rem !important}.sd-mr-4,.sd-mx-4{margin-right:1.5rem !important}.sd-mb-4,.sd-my-4{margin-bottom:1.5rem !important}.sd-ml-4,.sd-mx-4{margin-left:1.5rem !important}.sd-m-5{margin:3rem !important}.sd-mt-5,.sd-my-5{margin-top:3rem !important}.sd-mr-5,.sd-mx-5{margin-right:3rem !important}.sd-mb-5,.sd-my-5{margin-bottom:3rem !important}.sd-ml-5,.sd-mx-5{margin-left:3rem !important}.sd-w-25{width:25% !important}.sd-w-50{width:50% !important}.sd-w-75{width:75% !important}.sd-w-100{width:100% !important}.sd-w-auto{width:auto !important}.sd-h-25{height:25% !important}.sd-h-50{height:50% !important}.sd-h-75{height:75% !important}.sd-h-100{height:100% !important}.sd-h-auto{height:auto !important}.sd-d-none{display:none !important}.sd-d-inline{display:inline !important}.sd-d-inline-block{display:inline-block !important}.sd-d-block{display:block !important}.sd-d-grid{display:grid !important}.sd-d-flex-row{display:-ms-flexbox !important;display:flex !important;flex-direction:row !important}.sd-d-flex-column{display:-ms-flexbox !important;display:flex !important;flex-direction:column !important}.sd-d-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}@media(min-width: 576px){.sd-d-sm-none{display:none !important}.sd-d-sm-inline{display:inline !important}.sd-d-sm-inline-block{display:inline-block !important}.sd-d-sm-block{display:block !important}.sd-d-sm-grid{display:grid !important}.sd-d-sm-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-sm-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 768px){.sd-d-md-none{display:none !important}.sd-d-md-inline{display:inline !important}.sd-d-md-inline-block{display:inline-block !important}.sd-d-md-block{display:block !important}.sd-d-md-grid{display:grid !important}.sd-d-md-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-md-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 992px){.sd-d-lg-none{display:none !important}.sd-d-lg-inline{display:inline !important}.sd-d-lg-inline-block{display:inline-block !important}.sd-d-lg-block{display:block !important}.sd-d-lg-grid{display:grid !important}.sd-d-lg-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-lg-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}@media(min-width: 1200px){.sd-d-xl-none{display:none !important}.sd-d-xl-inline{display:inline !important}.sd-d-xl-inline-block{display:inline-block !important}.sd-d-xl-block{display:block !important}.sd-d-xl-grid{display:grid !important}.sd-d-xl-flex{display:-ms-flexbox !important;display:flex !important}.sd-d-xl-inline-flex{display:-ms-inline-flexbox !important;display:inline-flex !important}}.sd-align-major-start{justify-content:flex-start !important}.sd-align-major-end{justify-content:flex-end !important}.sd-align-major-center{justify-content:center !important}.sd-align-major-justify{justify-content:space-between !important}.sd-align-major-spaced{justify-content:space-evenly !important}.sd-align-minor-start{align-items:flex-start !important}.sd-align-minor-end{align-items:flex-end !important}.sd-align-minor-center{align-items:center !important}.sd-align-minor-stretch{align-items:stretch !important}.sd-text-justify{text-align:justify !important}.sd-text-left{text-align:left !important}.sd-text-right{text-align:right !important}.sd-text-center{text-align:center !important}.sd-font-weight-light{font-weight:300 !important}.sd-font-weight-lighter{font-weight:lighter !important}.sd-font-weight-normal{font-weight:400 !important}.sd-font-weight-bold{font-weight:700 !important}.sd-font-weight-bolder{font-weight:bolder !important}.sd-font-italic{font-style:italic !important}.sd-text-decoration-none{text-decoration:none !important}.sd-text-lowercase{text-transform:lowercase !important}.sd-text-uppercase{text-transform:uppercase !important}.sd-text-capitalize{text-transform:capitalize !important}.sd-text-wrap{white-space:normal !important}.sd-text-nowrap{white-space:nowrap !important}.sd-text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.sd-fs-1,.sd-fs-1>p{font-size:calc(1.375rem + 1.5vw) !important;line-height:unset !important}.sd-fs-2,.sd-fs-2>p{font-size:calc(1.325rem + 0.9vw) !important;line-height:unset !important}.sd-fs-3,.sd-fs-3>p{font-size:calc(1.3rem + 0.6vw) !important;line-height:unset !important}.sd-fs-4,.sd-fs-4>p{font-size:calc(1.275rem + 0.3vw) !important;line-height:unset !important}.sd-fs-5,.sd-fs-5>p{font-size:1.25rem !important;line-height:unset !important}.sd-fs-6,.sd-fs-6>p{font-size:1rem !important;line-height:unset !important}.sd-border-0{border:0 solid !important}.sd-border-top-0{border-top:0 solid !important}.sd-border-bottom-0{border-bottom:0 solid !important}.sd-border-right-0{border-right:0 solid !important}.sd-border-left-0{border-left:0 solid !important}.sd-border-1{border:1px solid !important}.sd-border-top-1{border-top:1px solid !important}.sd-border-bottom-1{border-bottom:1px solid !important}.sd-border-right-1{border-right:1px solid !important}.sd-border-left-1{border-left:1px solid !important}.sd-border-2{border:2px solid !important}.sd-border-top-2{border-top:2px solid !important}.sd-border-bottom-2{border-bottom:2px solid !important}.sd-border-right-2{border-right:2px solid !important}.sd-border-left-2{border-left:2px solid !important}.sd-border-3{border:3px solid !important}.sd-border-top-3{border-top:3px solid !important}.sd-border-bottom-3{border-bottom:3px solid !important}.sd-border-right-3{border-right:3px solid !important}.sd-border-left-3{border-left:3px solid !important}.sd-border-4{border:4px solid !important}.sd-border-top-4{border-top:4px solid !important}.sd-border-bottom-4{border-bottom:4px solid !important}.sd-border-right-4{border-right:4px solid !important}.sd-border-left-4{border-left:4px solid !important}.sd-border-5{border:5px solid !important}.sd-border-top-5{border-top:5px solid !important}.sd-border-bottom-5{border-bottom:5px solid !important}.sd-border-right-5{border-right:5px solid !important}.sd-border-left-5{border-left:5px solid !important}.sd-rounded-0{border-radius:0 !important}.sd-rounded-1{border-radius:.2rem !important}.sd-rounded-2{border-radius:.3rem !important}.sd-rounded-3{border-radius:.5rem !important}.sd-rounded-pill{border-radius:50rem !important}.sd-rounded-circle{border-radius:50% !important}.shadow-none{box-shadow:none !important}.sd-shadow-sm{box-shadow:0 .125rem .25rem var(--sd-color-shadow) !important}.sd-shadow-md{box-shadow:0 .5rem 1rem var(--sd-color-shadow) !important}.sd-shadow-lg{box-shadow:0 1rem 3rem var(--sd-color-shadow) !important}@keyframes sd-slide-from-left{0%{transform:translateX(-100%)}100%{transform:translateX(0)}}@keyframes sd-slide-from-right{0%{transform:translateX(200%)}100%{transform:translateX(0)}}@keyframes sd-grow100{0%{transform:scale(0);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50{0%{transform:scale(0.5);opacity:.5}100%{transform:scale(1);opacity:1}}@keyframes sd-grow50-rot20{0%{transform:scale(0.5) rotateZ(-20deg);opacity:.5}75%{transform:scale(1) rotateZ(5deg);opacity:1}95%{transform:scale(1) rotateZ(-1deg);opacity:1}100%{transform:scale(1) rotateZ(0);opacity:1}}.sd-animate-slide-from-left{animation:1s ease-out 0s 1 normal none running sd-slide-from-left}.sd-animate-slide-from-right{animation:1s ease-out 0s 1 normal none running sd-slide-from-right}.sd-animate-grow100{animation:1s ease-out 0s 1 normal none running sd-grow100}.sd-animate-grow50{animation:1s ease-out 0s 1 normal none running sd-grow50}.sd-animate-grow50-rot20{animation:1s ease-out 0s 1 normal none running sd-grow50-rot20}.sd-badge{display:inline-block;padding:.35em .65em;font-size:.75em;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25rem}.sd-badge:empty{display:none}a.sd-badge{text-decoration:none}.sd-btn .sd-badge{position:relative;top:-1px}.sd-btn{background-color:transparent;border:1px solid transparent;border-radius:.25rem;cursor:pointer;display:inline-block;font-weight:400;font-size:1rem;line-height:1.5;padding:.375rem .75rem;text-align:center;text-decoration:none;transition:color .15s ease-in-out,background-color .15s ease-in-out,border-color .15s ease-in-out,box-shadow .15s ease-in-out;vertical-align:middle;user-select:none;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none}.sd-btn:hover{text-decoration:none}@media(prefers-reduced-motion: reduce){.sd-btn{transition:none}}.sd-btn-primary,.sd-btn-outline-primary:hover,.sd-btn-outline-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-primary:hover,.sd-btn-primary:focus{color:var(--sd-color-primary-text) !important;background-color:var(--sd-color-primary-highlight) !important;border-color:var(--sd-color-primary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-primary{color:var(--sd-color-primary) !important;border-color:var(--sd-color-primary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary,.sd-btn-outline-secondary:hover,.sd-btn-outline-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-secondary:hover,.sd-btn-secondary:focus{color:var(--sd-color-secondary-text) !important;background-color:var(--sd-color-secondary-highlight) !important;border-color:var(--sd-color-secondary-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-secondary{color:var(--sd-color-secondary) !important;border-color:var(--sd-color-secondary) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success,.sd-btn-outline-success:hover,.sd-btn-outline-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-success:hover,.sd-btn-success:focus{color:var(--sd-color-success-text) !important;background-color:var(--sd-color-success-highlight) !important;border-color:var(--sd-color-success-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-success{color:var(--sd-color-success) !important;border-color:var(--sd-color-success) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info,.sd-btn-outline-info:hover,.sd-btn-outline-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-info:hover,.sd-btn-info:focus{color:var(--sd-color-info-text) !important;background-color:var(--sd-color-info-highlight) !important;border-color:var(--sd-color-info-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-info{color:var(--sd-color-info) !important;border-color:var(--sd-color-info) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning,.sd-btn-outline-warning:hover,.sd-btn-outline-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-warning:hover,.sd-btn-warning:focus{color:var(--sd-color-warning-text) !important;background-color:var(--sd-color-warning-highlight) !important;border-color:var(--sd-color-warning-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-warning{color:var(--sd-color-warning) !important;border-color:var(--sd-color-warning) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger,.sd-btn-outline-danger:hover,.sd-btn-outline-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-danger:hover,.sd-btn-danger:focus{color:var(--sd-color-danger-text) !important;background-color:var(--sd-color-danger-highlight) !important;border-color:var(--sd-color-danger-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-danger{color:var(--sd-color-danger) !important;border-color:var(--sd-color-danger) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light,.sd-btn-outline-light:hover,.sd-btn-outline-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-light:hover,.sd-btn-light:focus{color:var(--sd-color-light-text) !important;background-color:var(--sd-color-light-highlight) !important;border-color:var(--sd-color-light-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-light{color:var(--sd-color-light) !important;border-color:var(--sd-color-light) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted,.sd-btn-outline-muted:hover,.sd-btn-outline-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-muted:hover,.sd-btn-muted:focus{color:var(--sd-color-muted-text) !important;background-color:var(--sd-color-muted-highlight) !important;border-color:var(--sd-color-muted-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-muted{color:var(--sd-color-muted) !important;border-color:var(--sd-color-muted) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark,.sd-btn-outline-dark:hover,.sd-btn-outline-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-dark:hover,.sd-btn-dark:focus{color:var(--sd-color-dark-text) !important;background-color:var(--sd-color-dark-highlight) !important;border-color:var(--sd-color-dark-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-dark{color:var(--sd-color-dark) !important;border-color:var(--sd-color-dark) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black,.sd-btn-outline-black:hover,.sd-btn-outline-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-black:hover,.sd-btn-black:focus{color:var(--sd-color-black-text) !important;background-color:var(--sd-color-black-highlight) !important;border-color:var(--sd-color-black-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-black{color:var(--sd-color-black) !important;border-color:var(--sd-color-black) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white,.sd-btn-outline-white:hover,.sd-btn-outline-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-btn-white:hover,.sd-btn-white:focus{color:var(--sd-color-white-text) !important;background-color:var(--sd-color-white-highlight) !important;border-color:var(--sd-color-white-highlight) !important;border-width:1px !important;border-style:solid !important}.sd-btn-outline-white{color:var(--sd-color-white) !important;border-color:var(--sd-color-white) !important;border-width:1px !important;border-style:solid !important}.sd-stretched-link::after{position:absolute;top:0;right:0;bottom:0;left:0;z-index:1;content:""}.sd-hide-link-text{font-size:0}.sd-octicon,.sd-material-icon{display:inline-block;fill:currentColor;vertical-align:middle}.sd-avatar-xs{border-radius:50%;object-fit:cover;object-position:center;width:1rem;height:1rem}.sd-avatar-sm{border-radius:50%;object-fit:cover;object-position:center;width:3rem;height:3rem}.sd-avatar-md{border-radius:50%;object-fit:cover;object-position:center;width:5rem;height:5rem}.sd-avatar-lg{border-radius:50%;object-fit:cover;object-position:center;width:7rem;height:7rem}.sd-avatar-xl{border-radius:50%;object-fit:cover;object-position:center;width:10rem;height:10rem}.sd-avatar-inherit{border-radius:50%;object-fit:cover;object-position:center;width:inherit;height:inherit}.sd-avatar-initial{border-radius:50%;object-fit:cover;object-position:center;width:initial;height:initial}.sd-card{background-clip:border-box;background-color:var(--sd-color-card-background);border:1px solid var(--sd-color-card-border);border-radius:.25rem;color:var(--sd-color-card-text);display:-ms-flexbox;display:flex;-ms-flex-direction:column;flex-direction:column;min-width:0;position:relative;word-wrap:break-word}.sd-card>hr{margin-left:0;margin-right:0}.sd-card-hover:hover{border-color:var(--sd-color-card-border-hover);transform:scale(1.01)}.sd-card-body{-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem 1rem}.sd-card-title{margin-bottom:.5rem}.sd-card-subtitle{margin-top:-0.25rem;margin-bottom:0}.sd-card-text:last-child{margin-bottom:0}.sd-card-link:hover{text-decoration:none}.sd-card-link+.card-link{margin-left:1rem}.sd-card-header{padding:.5rem 1rem;margin-bottom:0;background-color:var(--sd-color-card-header);border-bottom:1px solid var(--sd-color-card-border)}.sd-card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.sd-card-footer{padding:.5rem 1rem;background-color:var(--sd-color-card-footer);border-top:1px solid var(--sd-color-card-border)}.sd-card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.sd-card-header-tabs{margin-right:-0.5rem;margin-bottom:-0.5rem;margin-left:-0.5rem;border-bottom:0}.sd-card-header-pills{margin-right:-0.5rem;margin-left:-0.5rem}.sd-card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1rem;border-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom,.sd-card-img-top{width:100%}.sd-card-img,.sd-card-img-top{border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.sd-card-img,.sd-card-img-bottom{border-bottom-left-radius:calc(0.25rem - 1px);border-bottom-right-radius:calc(0.25rem - 1px)}.sd-cards-carousel{width:100%;display:flex;flex-wrap:nowrap;-ms-flex-direction:row;flex-direction:row;overflow-x:hidden;scroll-snap-type:x mandatory}.sd-cards-carousel.sd-show-scrollbar{overflow-x:auto}.sd-cards-carousel:hover,.sd-cards-carousel:focus{overflow-x:auto}.sd-cards-carousel>.sd-card{flex-shrink:0;scroll-snap-align:start}.sd-cards-carousel>.sd-card:not(:last-child){margin-right:3px}.sd-card-cols-1>.sd-card{width:90%}.sd-card-cols-2>.sd-card{width:45%}.sd-card-cols-3>.sd-card{width:30%}.sd-card-cols-4>.sd-card{width:22.5%}.sd-card-cols-5>.sd-card{width:18%}.sd-card-cols-6>.sd-card{width:15%}.sd-card-cols-7>.sd-card{width:12.8571428571%}.sd-card-cols-8>.sd-card{width:11.25%}.sd-card-cols-9>.sd-card{width:10%}.sd-card-cols-10>.sd-card{width:9%}.sd-card-cols-11>.sd-card{width:8.1818181818%}.sd-card-cols-12>.sd-card{width:7.5%}.sd-container,.sd-container-fluid,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container-xl{margin-left:auto;margin-right:auto;padding-left:var(--sd-gutter-x, 0.75rem);padding-right:var(--sd-gutter-x, 0.75rem);width:100%}@media(min-width: 576px){.sd-container-sm,.sd-container{max-width:540px}}@media(min-width: 768px){.sd-container-md,.sd-container-sm,.sd-container{max-width:720px}}@media(min-width: 992px){.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:960px}}@media(min-width: 1200px){.sd-container-xl,.sd-container-lg,.sd-container-md,.sd-container-sm,.sd-container{max-width:1140px}}.sd-row{--sd-gutter-x: 1.5rem;--sd-gutter-y: 0;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-top:calc(var(--sd-gutter-y) * -1);margin-right:calc(var(--sd-gutter-x) * -0.5);margin-left:calc(var(--sd-gutter-x) * -0.5)}.sd-row>*{box-sizing:border-box;flex-shrink:0;width:100%;max-width:100%;padding-right:calc(var(--sd-gutter-x) * 0.5);padding-left:calc(var(--sd-gutter-x) * 0.5);margin-top:var(--sd-gutter-y)}.sd-col{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-auto>*{flex:0 0 auto;width:auto}.sd-row-cols-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}@media(min-width: 576px){.sd-col-sm{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-sm-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-sm-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-sm-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-sm-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-sm-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-sm-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-sm-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-sm-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-sm-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-sm-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-sm-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-sm-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-sm-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 768px){.sd-col-md{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-md-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-md-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-md-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-md-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-md-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-md-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-md-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-md-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-md-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-md-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-md-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-md-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-md-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 992px){.sd-col-lg{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-lg-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-lg-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-lg-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-lg-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-lg-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-lg-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-lg-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-lg-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-lg-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-lg-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-lg-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-lg-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-lg-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}@media(min-width: 1200px){.sd-col-xl{flex:1 0 0%;-ms-flex:1 0 0%}.sd-row-cols-xl-auto{flex:1 0 auto;-ms-flex:1 0 auto;width:100%}.sd-row-cols-xl-1>*{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-row-cols-xl-2>*{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-row-cols-xl-3>*{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-row-cols-xl-4>*{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-row-cols-xl-5>*{flex:0 0 auto;-ms-flex:0 0 auto;width:20%}.sd-row-cols-xl-6>*{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-row-cols-xl-7>*{flex:0 0 auto;-ms-flex:0 0 auto;width:14.2857142857%}.sd-row-cols-xl-8>*{flex:0 0 auto;-ms-flex:0 0 auto;width:12.5%}.sd-row-cols-xl-9>*{flex:0 0 auto;-ms-flex:0 0 auto;width:11.1111111111%}.sd-row-cols-xl-10>*{flex:0 0 auto;-ms-flex:0 0 auto;width:10%}.sd-row-cols-xl-11>*{flex:0 0 auto;-ms-flex:0 0 auto;width:9.0909090909%}.sd-row-cols-xl-12>*{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}}.sd-col-auto{flex:0 0 auto;-ms-flex:0 0 auto;width:auto}.sd-col-1{flex:0 0 auto;-ms-flex:0 0 auto;width:8.3333333333%}.sd-col-2{flex:0 0 auto;-ms-flex:0 0 auto;width:16.6666666667%}.sd-col-3{flex:0 0 auto;-ms-flex:0 0 auto;width:25%}.sd-col-4{flex:0 0 auto;-ms-flex:0 0 auto;width:33.3333333333%}.sd-col-5{flex:0 0 auto;-ms-flex:0 0 auto;width:41.6666666667%}.sd-col-6{flex:0 0 auto;-ms-flex:0 0 auto;width:50%}.sd-col-7{flex:0 0 auto;-ms-flex:0 0 auto;width:58.3333333333%}.sd-col-8{flex:0 0 auto;-ms-flex:0 0 auto;width:66.6666666667%}.sd-col-9{flex:0 0 auto;-ms-flex:0 0 auto;width:75%}.sd-col-10{flex:0 0 auto;-ms-flex:0 0 auto;width:83.3333333333%}.sd-col-11{flex:0 0 auto;-ms-flex:0 0 auto;width:91.6666666667%}.sd-col-12{flex:0 0 auto;-ms-flex:0 0 auto;width:100%}.sd-g-0,.sd-gy-0{--sd-gutter-y: 0}.sd-g-0,.sd-gx-0{--sd-gutter-x: 0}.sd-g-1,.sd-gy-1{--sd-gutter-y: 0.25rem}.sd-g-1,.sd-gx-1{--sd-gutter-x: 0.25rem}.sd-g-2,.sd-gy-2{--sd-gutter-y: 0.5rem}.sd-g-2,.sd-gx-2{--sd-gutter-x: 0.5rem}.sd-g-3,.sd-gy-3{--sd-gutter-y: 1rem}.sd-g-3,.sd-gx-3{--sd-gutter-x: 1rem}.sd-g-4,.sd-gy-4{--sd-gutter-y: 1.5rem}.sd-g-4,.sd-gx-4{--sd-gutter-x: 1.5rem}.sd-g-5,.sd-gy-5{--sd-gutter-y: 3rem}.sd-g-5,.sd-gx-5{--sd-gutter-x: 3rem}@media(min-width: 576px){.sd-col-sm-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-sm-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-sm-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-sm-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-sm-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-sm-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-sm-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-sm-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-sm-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-sm-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-sm-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-sm-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-sm-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-sm-0,.sd-gy-sm-0{--sd-gutter-y: 0}.sd-g-sm-0,.sd-gx-sm-0{--sd-gutter-x: 0}.sd-g-sm-1,.sd-gy-sm-1{--sd-gutter-y: 0.25rem}.sd-g-sm-1,.sd-gx-sm-1{--sd-gutter-x: 0.25rem}.sd-g-sm-2,.sd-gy-sm-2{--sd-gutter-y: 0.5rem}.sd-g-sm-2,.sd-gx-sm-2{--sd-gutter-x: 0.5rem}.sd-g-sm-3,.sd-gy-sm-3{--sd-gutter-y: 1rem}.sd-g-sm-3,.sd-gx-sm-3{--sd-gutter-x: 1rem}.sd-g-sm-4,.sd-gy-sm-4{--sd-gutter-y: 1.5rem}.sd-g-sm-4,.sd-gx-sm-4{--sd-gutter-x: 1.5rem}.sd-g-sm-5,.sd-gy-sm-5{--sd-gutter-y: 3rem}.sd-g-sm-5,.sd-gx-sm-5{--sd-gutter-x: 3rem}}@media(min-width: 768px){.sd-col-md-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-md-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-md-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-md-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-md-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-md-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-md-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-md-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-md-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-md-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-md-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-md-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-md-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-md-0,.sd-gy-md-0{--sd-gutter-y: 0}.sd-g-md-0,.sd-gx-md-0{--sd-gutter-x: 0}.sd-g-md-1,.sd-gy-md-1{--sd-gutter-y: 0.25rem}.sd-g-md-1,.sd-gx-md-1{--sd-gutter-x: 0.25rem}.sd-g-md-2,.sd-gy-md-2{--sd-gutter-y: 0.5rem}.sd-g-md-2,.sd-gx-md-2{--sd-gutter-x: 0.5rem}.sd-g-md-3,.sd-gy-md-3{--sd-gutter-y: 1rem}.sd-g-md-3,.sd-gx-md-3{--sd-gutter-x: 1rem}.sd-g-md-4,.sd-gy-md-4{--sd-gutter-y: 1.5rem}.sd-g-md-4,.sd-gx-md-4{--sd-gutter-x: 1.5rem}.sd-g-md-5,.sd-gy-md-5{--sd-gutter-y: 3rem}.sd-g-md-5,.sd-gx-md-5{--sd-gutter-x: 3rem}}@media(min-width: 992px){.sd-col-lg-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-lg-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-lg-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-lg-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-lg-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-lg-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-lg-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-lg-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-lg-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-lg-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-lg-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-lg-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-lg-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-lg-0,.sd-gy-lg-0{--sd-gutter-y: 0}.sd-g-lg-0,.sd-gx-lg-0{--sd-gutter-x: 0}.sd-g-lg-1,.sd-gy-lg-1{--sd-gutter-y: 0.25rem}.sd-g-lg-1,.sd-gx-lg-1{--sd-gutter-x: 0.25rem}.sd-g-lg-2,.sd-gy-lg-2{--sd-gutter-y: 0.5rem}.sd-g-lg-2,.sd-gx-lg-2{--sd-gutter-x: 0.5rem}.sd-g-lg-3,.sd-gy-lg-3{--sd-gutter-y: 1rem}.sd-g-lg-3,.sd-gx-lg-3{--sd-gutter-x: 1rem}.sd-g-lg-4,.sd-gy-lg-4{--sd-gutter-y: 1.5rem}.sd-g-lg-4,.sd-gx-lg-4{--sd-gutter-x: 1.5rem}.sd-g-lg-5,.sd-gy-lg-5{--sd-gutter-y: 3rem}.sd-g-lg-5,.sd-gx-lg-5{--sd-gutter-x: 3rem}}@media(min-width: 1200px){.sd-col-xl-auto{-ms-flex:0 0 auto;flex:0 0 auto;width:auto}.sd-col-xl-1{-ms-flex:0 0 auto;flex:0 0 auto;width:8.3333333333%}.sd-col-xl-2{-ms-flex:0 0 auto;flex:0 0 auto;width:16.6666666667%}.sd-col-xl-3{-ms-flex:0 0 auto;flex:0 0 auto;width:25%}.sd-col-xl-4{-ms-flex:0 0 auto;flex:0 0 auto;width:33.3333333333%}.sd-col-xl-5{-ms-flex:0 0 auto;flex:0 0 auto;width:41.6666666667%}.sd-col-xl-6{-ms-flex:0 0 auto;flex:0 0 auto;width:50%}.sd-col-xl-7{-ms-flex:0 0 auto;flex:0 0 auto;width:58.3333333333%}.sd-col-xl-8{-ms-flex:0 0 auto;flex:0 0 auto;width:66.6666666667%}.sd-col-xl-9{-ms-flex:0 0 auto;flex:0 0 auto;width:75%}.sd-col-xl-10{-ms-flex:0 0 auto;flex:0 0 auto;width:83.3333333333%}.sd-col-xl-11{-ms-flex:0 0 auto;flex:0 0 auto;width:91.6666666667%}.sd-col-xl-12{-ms-flex:0 0 auto;flex:0 0 auto;width:100%}.sd-g-xl-0,.sd-gy-xl-0{--sd-gutter-y: 0}.sd-g-xl-0,.sd-gx-xl-0{--sd-gutter-x: 0}.sd-g-xl-1,.sd-gy-xl-1{--sd-gutter-y: 0.25rem}.sd-g-xl-1,.sd-gx-xl-1{--sd-gutter-x: 0.25rem}.sd-g-xl-2,.sd-gy-xl-2{--sd-gutter-y: 0.5rem}.sd-g-xl-2,.sd-gx-xl-2{--sd-gutter-x: 0.5rem}.sd-g-xl-3,.sd-gy-xl-3{--sd-gutter-y: 1rem}.sd-g-xl-3,.sd-gx-xl-3{--sd-gutter-x: 1rem}.sd-g-xl-4,.sd-gy-xl-4{--sd-gutter-y: 1.5rem}.sd-g-xl-4,.sd-gx-xl-4{--sd-gutter-x: 1.5rem}.sd-g-xl-5,.sd-gy-xl-5{--sd-gutter-y: 3rem}.sd-g-xl-5,.sd-gx-xl-5{--sd-gutter-x: 3rem}}.sd-flex-row-reverse{flex-direction:row-reverse !important}details.sd-dropdown{position:relative}details.sd-dropdown .sd-summary-title{font-weight:700;padding-right:3em !important;-moz-user-select:none;-ms-user-select:none;-webkit-user-select:none;user-select:none}details.sd-dropdown:hover{cursor:pointer}details.sd-dropdown .sd-summary-content{cursor:default}details.sd-dropdown summary{list-style:none;padding:1em}details.sd-dropdown summary .sd-octicon.no-title{vertical-align:middle}details.sd-dropdown[open] summary .sd-octicon.no-title{visibility:hidden}details.sd-dropdown summary::-webkit-details-marker{display:none}details.sd-dropdown summary:focus{outline:none}details.sd-dropdown .sd-summary-icon{margin-right:.5em}details.sd-dropdown .sd-summary-icon svg{opacity:.8}details.sd-dropdown summary:hover .sd-summary-up svg,details.sd-dropdown summary:hover .sd-summary-down svg{opacity:1;transform:scale(1.1)}details.sd-dropdown .sd-summary-up svg,details.sd-dropdown .sd-summary-down svg{display:block;opacity:.6}details.sd-dropdown .sd-summary-up,details.sd-dropdown .sd-summary-down{pointer-events:none;position:absolute;right:1em;top:1em}details.sd-dropdown[open]>.sd-summary-title .sd-summary-down{visibility:hidden}details.sd-dropdown:not([open])>.sd-summary-title .sd-summary-up{visibility:hidden}details.sd-dropdown:not([open]).sd-card{border:none}details.sd-dropdown:not([open])>.sd-card-header{border:1px solid var(--sd-color-card-border);border-radius:.25rem}details.sd-dropdown.sd-fade-in[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out;animation:sd-fade-in .5s ease-in-out}details.sd-dropdown.sd-fade-in-slide-down[open] summary~*{-moz-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;-webkit-animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out;animation:sd-fade-in .5s ease-in-out,sd-slide-down .5s ease-in-out}.sd-col>.sd-dropdown{width:100%}.sd-summary-content>.sd-tab-set:first-child{margin-top:0}@keyframes sd-fade-in{0%{opacity:0}100%{opacity:1}}@keyframes sd-slide-down{0%{transform:translate(0, -10px)}100%{transform:translate(0, 0)}}.sd-tab-set{border-radius:.125rem;display:flex;flex-wrap:wrap;margin:1em 0;position:relative}.sd-tab-set>input{opacity:0;position:absolute}.sd-tab-set>input:checked+label{border-color:var(--sd-color-tabs-underline-active);color:var(--sd-color-tabs-label-active)}.sd-tab-set>input:checked+label+.sd-tab-content{display:block}.sd-tab-set>input:not(:checked)+label:hover{color:var(--sd-color-tabs-label-hover);border-color:var(--sd-color-tabs-underline-hover)}.sd-tab-set>input:focus+label{outline-style:auto}.sd-tab-set>input:not(.focus-visible)+label{outline:none;-webkit-tap-highlight-color:transparent}.sd-tab-set>label{border-bottom:.125rem solid transparent;margin-bottom:0;color:var(--sd-color-tabs-label-inactive);border-color:var(--sd-color-tabs-underline-inactive);cursor:pointer;font-size:var(--sd-fontsize-tabs-label);font-weight:700;padding:1em 1.25em .5em;transition:color 250ms;width:auto;z-index:1}html .sd-tab-set>label:hover{color:var(--sd-color-tabs-label-active)}.sd-col>.sd-tab-set{width:100%}.sd-tab-content{box-shadow:0 -0.0625rem var(--sd-color-tabs-overline),0 .0625rem var(--sd-color-tabs-underline);display:none;order:99;padding-bottom:.75rem;padding-top:.75rem;width:100%}.sd-tab-content>:first-child{margin-top:0 !important}.sd-tab-content>:last-child{margin-bottom:0 !important}.sd-tab-content>.sd-tab-set{margin:0}.sd-sphinx-override,.sd-sphinx-override *{-moz-box-sizing:border-box;-webkit-box-sizing:border-box;box-sizing:border-box}.sd-sphinx-override p{margin-top:0}:root{--sd-color-primary: #007bff;--sd-color-secondary: #6c757d;--sd-color-success: #28a745;--sd-color-info: #17a2b8;--sd-color-warning: #f0b37e;--sd-color-danger: #dc3545;--sd-color-light: #f8f9fa;--sd-color-muted: #6c757d;--sd-color-dark: #212529;--sd-color-black: black;--sd-color-white: white;--sd-color-primary-highlight: #0069d9;--sd-color-secondary-highlight: #5c636a;--sd-color-success-highlight: #228e3b;--sd-color-info-highlight: #148a9c;--sd-color-warning-highlight: #cc986b;--sd-color-danger-highlight: #bb2d3b;--sd-color-light-highlight: #d3d4d5;--sd-color-muted-highlight: #5c636a;--sd-color-dark-highlight: #1c1f23;--sd-color-black-highlight: black;--sd-color-white-highlight: #d9d9d9;--sd-color-primary-text: #fff;--sd-color-secondary-text: #fff;--sd-color-success-text: #fff;--sd-color-info-text: #fff;--sd-color-warning-text: #212529;--sd-color-danger-text: #fff;--sd-color-light-text: #212529;--sd-color-muted-text: #fff;--sd-color-dark-text: #fff;--sd-color-black-text: #fff;--sd-color-white-text: #212529;--sd-color-shadow: rgba(0, 0, 0, 0.15);--sd-color-card-border: rgba(0, 0, 0, 0.125);--sd-color-card-border-hover: hsla(231, 99%, 66%, 1);--sd-color-card-background: transparent;--sd-color-card-text: inherit;--sd-color-card-header: transparent;--sd-color-card-footer: transparent;--sd-color-tabs-label-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-hover: hsla(231, 99%, 66%, 1);--sd-color-tabs-label-inactive: hsl(0, 0%, 66%);--sd-color-tabs-underline-active: hsla(231, 99%, 66%, 1);--sd-color-tabs-underline-hover: rgba(178, 206, 245, 0.62);--sd-color-tabs-underline-inactive: transparent;--sd-color-tabs-overline: rgb(222, 222, 222);--sd-color-tabs-underline: rgb(222, 222, 222);--sd-fontsize-tabs-label: 1rem} diff --git a/_static/design-tabs.js b/_static/design-tabs.js new file mode 100644 index 0000000000..36b38cf0d9 --- /dev/null +++ b/_static/design-tabs.js @@ -0,0 +1,27 @@ +var sd_labels_by_text = {}; + +function ready() { + const li = document.getElementsByClassName("sd-tab-label"); + for (const label of li) { + syncId = label.getAttribute("data-sync-id"); + if (syncId) { + label.onclick = onLabelClick; + if (!sd_labels_by_text[syncId]) { + sd_labels_by_text[syncId] = []; + } + sd_labels_by_text[syncId].push(label); + } + } +} + +function onLabelClick() { + // Activate other inputs with the same sync id. + syncId = this.getAttribute("data-sync-id"); + for (label of sd_labels_by_text[syncId]) { + if (label === this) continue; + label.previousElementSibling.checked = true; + } + window.localStorage.setItem("sphinx-design-last-tab", syncId); +} + +document.addEventListener("DOMContentLoaded", ready, false); diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 0000000000..c3db08d1c3 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,264 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.highlightSearchWords(); + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords: () => { + const highlight = + new URLSearchParams(window.location.search).get("highlight") || ""; + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + const url = new URL(window.location); + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + const blacklistedElements = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", + ]); + document.addEventListener("keydown", (event) => { + if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements + if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + case "Escape": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.hideSearchWords(); + event.preventDefault(); + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 0000000000..162a6ba8d8 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: false, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 0000000000..a858a410e4 Binary files /dev/null and b/_static/file.png differ diff --git a/_static/images/logo_binder.svg b/_static/images/logo_binder.svg new file mode 100644 index 0000000000..45fecf7511 --- /dev/null +++ b/_static/images/logo_binder.svg @@ -0,0 +1,19 @@ + + + + +logo + + + + + + + + diff --git a/_static/images/logo_colab.png b/_static/images/logo_colab.png new file mode 100644 index 0000000000..b7560ec216 Binary files /dev/null and b/_static/images/logo_colab.png differ diff --git a/_static/images/logo_deepnote.svg b/_static/images/logo_deepnote.svg new file mode 100644 index 0000000000..fa77ebfc25 --- /dev/null +++ b/_static/images/logo_deepnote.svg @@ -0,0 +1 @@ + diff --git a/_static/images/logo_jupyterhub.svg b/_static/images/logo_jupyterhub.svg new file mode 100644 index 0000000000..60cfe9f222 --- /dev/null +++ b/_static/images/logo_jupyterhub.svg @@ -0,0 +1 @@ +logo_jupyterhubHub diff --git a/_static/jquery-3.6.0.js b/_static/jquery-3.6.0.js new file mode 100644 index 0000000000..fc6c299b73 --- /dev/null +++ b/_static/jquery-3.6.0.js @@ -0,0 +1,10881 @@ +/*! + * jQuery JavaScript Library v3.6.0 + * https://jquery.com/ + * + * Includes Sizzle.js + * https://sizzlejs.com/ + * + * Copyright OpenJS Foundation and other contributors + * Released under the MIT license + * https://jquery.org/license + * + * Date: 2021-03-02T17:08Z + */ +( function( global, factory ) { + + "use strict"; + + if ( typeof module === "object" && typeof module.exports === "object" ) { + + // For CommonJS and CommonJS-like environments where a proper `window` + // is present, execute the factory and get jQuery. + // For environments that do not have a `window` with a `document` + // (such as Node.js), expose a factory as module.exports. + // This accentuates the need for the creation of a real `window`. + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info. + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1 +// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode +// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common +// enough that all such attempts are guarded in a try block. +"use strict"; + +var arr = []; + +var getProto = Object.getPrototypeOf; + +var slice = arr.slice; + +var flat = arr.flat ? function( array ) { + return arr.flat.call( array ); +} : function( array ) { + return arr.concat.apply( [], array ); +}; + + +var push = arr.push; + +var indexOf = arr.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var fnToString = hasOwn.toString; + +var ObjectFunctionString = fnToString.call( Object ); + +var support = {}; + +var isFunction = function isFunction( obj ) { + + // Support: Chrome <=57, Firefox <=52 + // In some browsers, typeof returns "function" for HTML elements + // (i.e., `typeof document.createElement( "object" ) === "function"`). + // We don't want to classify *any* DOM node as a function. + // Support: QtWeb <=3.8.5, WebKit <=534.34, wkhtmltopdf tool <=0.12.5 + // Plus for old WebKit, typeof returns "function" for HTML collections + // (e.g., `typeof document.getElementsByTagName("div") === "function"`). (gh-4756) + return typeof obj === "function" && typeof obj.nodeType !== "number" && + typeof obj.item !== "function"; + }; + + +var isWindow = function isWindow( obj ) { + return obj != null && obj === obj.window; + }; + + +var document = window.document; + + + + var preservedScriptAttributes = { + type: true, + src: true, + nonce: true, + noModule: true + }; + + function DOMEval( code, node, doc ) { + doc = doc || document; + + var i, val, + script = doc.createElement( "script" ); + + script.text = code; + if ( node ) { + for ( i in preservedScriptAttributes ) { + + // Support: Firefox 64+, Edge 18+ + // Some browsers don't support the "nonce" property on scripts. + // On the other hand, just using `getAttribute` is not enough as + // the `nonce` attribute is reset to an empty string whenever it + // becomes browsing-context connected. + // See https://github.com/whatwg/html/issues/2369 + // See https://html.spec.whatwg.org/#nonce-attributes + // The `node.getAttribute` check was added for the sake of + // `jQuery.globalEval` so that it can fake a nonce-containing node + // via an object. + val = node[ i ] || node.getAttribute && node.getAttribute( i ); + if ( val ) { + script.setAttribute( i, val ); + } + } + } + doc.head.appendChild( script ).parentNode.removeChild( script ); + } + + +function toType( obj ) { + if ( obj == null ) { + return obj + ""; + } + + // Support: Android <=2.3 only (functionish RegExp) + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call( obj ) ] || "object" : + typeof obj; +} +/* global Symbol */ +// Defining this global in .eslintrc.json would create a danger of using the global +// unguarded in another place, it seems safer to define global only for this module + + + +var + version = "3.6.0", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }; + +jQuery.fn = jQuery.prototype = { + + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + + // Return all the elements in a clean array + if ( num == null ) { + return slice.call( this ); + } + + // Return just the one element from the set + return num < 0 ? this[ num + this.length ] : this[ num ]; + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + each: function( callback ) { + return jQuery.each( this, callback ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map( this, function( elem, i ) { + return callback.call( elem, i, elem ); + } ) ); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + even: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return ( i + 1 ) % 2; + } ) ); + }, + + odd: function() { + return this.pushStack( jQuery.grep( this, function( _elem, i ) { + return i % 2; + } ) ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: arr.sort, + splice: arr.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[ 0 ] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // Skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !isFunction( target ) ) { + target = {}; + } + + // Extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + + // Only deal with non-null/undefined values + if ( ( options = arguments[ i ] ) != null ) { + + // Extend the base object + for ( name in options ) { + copy = options[ name ]; + + // Prevent Object.prototype pollution + // Prevent never-ending loop + if ( name === "__proto__" || target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject( copy ) || + ( copyIsArray = Array.isArray( copy ) ) ) ) { + src = target[ name ]; + + // Ensure proper type for the source value + if ( copyIsArray && !Array.isArray( src ) ) { + clone = []; + } else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) { + clone = {}; + } else { + clone = src; + } + copyIsArray = false; + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend( { + + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + isPlainObject: function( obj ) { + var proto, Ctor; + + // Detect obvious negatives + // Use toString instead of jQuery.type to catch host objects + if ( !obj || toString.call( obj ) !== "[object Object]" ) { + return false; + } + + proto = getProto( obj ); + + // Objects with no prototype (e.g., `Object.create( null )`) are plain + if ( !proto ) { + return true; + } + + // Objects with prototype are plain iff they were constructed by a global Object function + Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor; + return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString; + }, + + isEmptyObject: function( obj ) { + var name; + + for ( name in obj ) { + return false; + } + return true; + }, + + // Evaluates a script in a provided context; falls back to the global one + // if not specified. + globalEval: function( code, options, doc ) { + DOMEval( code, { nonce: options && options.nonce }, doc ); + }, + + each: function( obj, callback ) { + var length, i = 0; + + if ( isArrayLike( obj ) ) { + length = obj.length; + for ( ; i < length; i++ ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } else { + for ( i in obj ) { + if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) { + break; + } + } + } + + return obj; + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArrayLike( Object( arr ) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + return arr == null ? -1 : indexOf.call( arr, elem, i ); + }, + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + for ( ; j < len; j++ ) { + first[ i++ ] = second[ j ]; + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var length, value, + i = 0, + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArrayLike( elems ) ) { + length = elems.length; + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return flat( ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +} ); + +if ( typeof Symbol === "function" ) { + jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ]; +} + +// Populate the class2type map +jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ), + function( _i, name ) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); + } ); + +function isArrayLike( obj ) { + + // Support: real iOS 8.2 only (not reproducible in simulator) + // `in` check used to prevent JIT error (gh-2145) + // hasOwn isn't used here due to false negatives + // regarding Nodelist length in IE + var length = !!obj && "length" in obj && obj.length, + type = toType( obj ); + + if ( isFunction( obj ) || isWindow( obj ) ) { + return false; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v2.3.6 + * https://sizzlejs.com/ + * + * Copyright JS Foundation and other contributors + * Released under the MIT license + * https://js.foundation/ + * + * Date: 2021-02-16 + */ +( function( window ) { +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + 1 * new Date(), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + nonnativeSelectorCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // Instance methods + hasOwn = ( {} ).hasOwnProperty, + arr = [], + pop = arr.pop, + pushNative = arr.push, + push = arr.push, + slice = arr.slice, + + // Use a stripped-down indexOf as it's faster than native + // https://jsperf.com/thor-indexof-vs-for/5 + indexOf = function( list, elem ) { + var i = 0, + len = list.length; + for ( ; i < len; i++ ) { + if ( list[ i ] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" + + "ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + + // https://www.w3.org/TR/css-syntax-3/#ident-token-diagram + identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace + + "?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+", + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace + + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + + // "Attribute values must be CSS identifiers [capture 5] + // or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + + whitespace + "*\\]", + + pseudos = ":(" + identifier + ")(?:\\((" + + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rwhitespace = new RegExp( whitespace + "+", "g" ), + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + + "*" ), + rdescend = new RegExp( whitespace + "|>" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + identifier + ")" ), + "CLASS": new RegExp( "^\\.(" + identifier + ")" ), + "TAG": new RegExp( "^(" + identifier + "|[*])" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + + whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + + whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rhtml = /HTML$/i, + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + + // CSS escapes + // http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ), + funescape = function( escape, nonHex ) { + var high = "0x" + escape.slice( 1 ) - 0x10000; + + return nonHex ? + + // Strip the backslash prefix from a non-hex escape sequence + nonHex : + + // Replace a hexadecimal escape sequence with the encoded Unicode code point + // Support: IE <=11+ + // For values outside the Basic Multilingual Plane (BMP), manually construct a + // surrogate pair + high < 0 ? + String.fromCharCode( high + 0x10000 ) : + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }, + + // CSS string/identifier serialization + // https://drafts.csswg.org/cssom/#common-serializing-idioms + rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g, + fcssescape = function( ch, asCodePoint ) { + if ( asCodePoint ) { + + // U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER + if ( ch === "\0" ) { + return "\uFFFD"; + } + + // Control characters and (dependent upon position) numbers get escaped as code points + return ch.slice( 0, -1 ) + "\\" + + ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " "; + } + + // Other potentially-special ASCII characters get backslash-escaped + return "\\" + ch; + }, + + // Used for iframes + // See setDocument() + // Removing the function wrapper causes a "Permission Denied" + // error in IE + unloadHandler = function() { + setDocument(); + }, + + inDisabledFieldset = addCombinator( + function( elem ) { + return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset"; + }, + { dir: "parentNode", next: "legend" } + ); + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + ( arr = slice.call( preferredDoc.childNodes ) ), + preferredDoc.childNodes + ); + + // Support: Android<4.0 + // Detect silently failing push.apply + // eslint-disable-next-line no-unused-expressions + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + pushNative.apply( target, slice.call( els ) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + + // Can't trust NodeList.length + while ( ( target[ j++ ] = els[ i++ ] ) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var m, i, elem, nid, match, groups, newSelector, + newContext = context && context.ownerDocument, + + // nodeType defaults to 9, since context defaults to document + nodeType = context ? context.nodeType : 9; + + results = results || []; + + // Return early from calls with invalid selector or context + if ( typeof selector !== "string" || !selector || + nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) { + + return results; + } + + // Try to shortcut find operations (as opposed to filters) in HTML documents + if ( !seed ) { + setDocument( context ); + context = context || document; + + if ( documentIsHTML ) { + + // If the selector is sufficiently simple, try using a "get*By*" DOM method + // (excepting DocumentFragment context, where the methods don't exist) + if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) { + + // ID selector + if ( ( m = match[ 1 ] ) ) { + + // Document context + if ( nodeType === 9 ) { + if ( ( elem = context.getElementById( m ) ) ) { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + + // Element context + } else { + + // Support: IE, Opera, Webkit + // TODO: identify versions + // getElementById can match elements by name instead of ID + if ( newContext && ( elem = newContext.getElementById( m ) ) && + contains( context, elem ) && + elem.id === m ) { + + results.push( elem ); + return results; + } + } + + // Type selector + } else if ( match[ 2 ] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Class selector + } else if ( ( m = match[ 3 ] ) && support.getElementsByClassName && + context.getElementsByClassName ) { + + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // Take advantage of querySelectorAll + if ( support.qsa && + !nonnativeSelectorCache[ selector + " " ] && + ( !rbuggyQSA || !rbuggyQSA.test( selector ) ) && + + // Support: IE 8 only + // Exclude object elements + ( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) { + + newSelector = selector; + newContext = context; + + // qSA considers elements outside a scoping root when evaluating child or + // descendant combinators, which is not what we want. + // In such cases, we work around the behavior by prefixing every selector in the + // list with an ID selector referencing the scope context. + // The technique has to be used as well when a leading combinator is used + // as such selectors are not recognized by querySelectorAll. + // Thanks to Andrew Dupont for this technique. + if ( nodeType === 1 && + ( rdescend.test( selector ) || rcombinators.test( selector ) ) ) { + + // Expand context for sibling selectors + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || + context; + + // We can use :scope instead of the ID hack if the browser + // supports it & if we're not changing the context. + if ( newContext !== context || !support.scope ) { + + // Capture the context ID, setting it first if necessary + if ( ( nid = context.getAttribute( "id" ) ) ) { + nid = nid.replace( rcssescape, fcssescape ); + } else { + context.setAttribute( "id", ( nid = expando ) ); + } + } + + // Prefix every selector in the list + groups = tokenize( selector ); + i = groups.length; + while ( i-- ) { + groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " + + toSelector( groups[ i ] ); + } + newSelector = groups.join( "," ); + } + + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch ( qsaError ) { + nonnativeSelectorCache( selector, true ); + } finally { + if ( nid === expando ) { + context.removeAttribute( "id" ); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {function(string, object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return ( cache[ key + " " ] = value ); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created element and returns a boolean result + */ +function assert( fn ) { + var el = document.createElement( "fieldset" ); + + try { + return !!fn( el ); + } catch ( e ) { + return false; + } finally { + + // Remove from its parent by default + if ( el.parentNode ) { + el.parentNode.removeChild( el ); + } + + // release memory in IE + el = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split( "|" ), + i = arr.length; + + while ( i-- ) { + Expr.attrHandle[ arr[ i ] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + a.sourceIndex - b.sourceIndex; + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( ( cur = cur.nextSibling ) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return ( name === "input" || name === "button" ) && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for :enabled/:disabled + * @param {Boolean} disabled true for :disabled; false for :enabled + */ +function createDisabledPseudo( disabled ) { + + // Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable + return function( elem ) { + + // Only certain elements can match :enabled or :disabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled + // https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled + if ( "form" in elem ) { + + // Check for inherited disabledness on relevant non-disabled elements: + // * listed form-associated elements in a disabled fieldset + // https://html.spec.whatwg.org/multipage/forms.html#category-listed + // https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled + // * option elements in a disabled optgroup + // https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled + // All such elements have a "form" property. + if ( elem.parentNode && elem.disabled === false ) { + + // Option elements defer to a parent optgroup if present + if ( "label" in elem ) { + if ( "label" in elem.parentNode ) { + return elem.parentNode.disabled === disabled; + } else { + return elem.disabled === disabled; + } + } + + // Support: IE 6 - 11 + // Use the isDisabled shortcut property to check for disabled fieldset ancestors + return elem.isDisabled === disabled || + + // Where there is no isDisabled, check manually + /* jshint -W018 */ + elem.isDisabled !== !disabled && + inDisabledFieldset( elem ) === disabled; + } + + return elem.disabled === disabled; + + // Try to winnow out elements that can't be disabled before trusting the disabled property. + // Some victims get caught in our net (label, legend, menu, track), but it shouldn't + // even exist on them, let alone have a boolean value. + } else if ( "label" in elem ) { + return elem.disabled === disabled; + } + + // Remaining elements are neither :enabled nor :disabled + return false; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction( function( argument ) { + argument = +argument; + return markFunction( function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ ( j = matchIndexes[ i ] ) ] ) { + seed[ j ] = !( matches[ j ] = seed[ j ] ); + } + } + } ); + } ); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== "undefined" && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + var namespace = elem && elem.namespaceURI, + docElem = elem && ( elem.ownerDocument || elem ).documentElement; + + // Support: IE <=8 + // Assume HTML when documentElement doesn't yet exist, such as inside loading iframes + // https://bugs.jquery.com/ticket/4833 + return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" ); +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, subWindow, + doc = node ? node.ownerDocument || node : preferredDoc; + + // Return early if doc is invalid or already selected + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Update global variables + document = doc; + docElem = document.documentElement; + documentIsHTML = !isXML( document ); + + // Support: IE 9 - 11+, Edge 12 - 18+ + // Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936) + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( preferredDoc != document && + ( subWindow = document.defaultView ) && subWindow.top !== subWindow ) { + + // Support: IE 11, Edge + if ( subWindow.addEventListener ) { + subWindow.addEventListener( "unload", unloadHandler, false ); + + // Support: IE 9 - 10 only + } else if ( subWindow.attachEvent ) { + subWindow.attachEvent( "onunload", unloadHandler ); + } + } + + // Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only, + // Safari 4 - 5 only, Opera <=11.6 - 12.x only + // IE/Edge & older browsers don't support the :scope pseudo-class. + // Support: Safari 6.0 only + // Safari 6.0 supports :scope but it's an alias of :root there. + support.scope = assert( function( el ) { + docElem.appendChild( el ).appendChild( document.createElement( "div" ) ); + return typeof el.querySelectorAll !== "undefined" && + !el.querySelectorAll( ":scope fieldset div" ).length; + } ); + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties + // (excepting IE8 booleans) + support.attributes = assert( function( el ) { + el.className = "i"; + return !el.getAttribute( "className" ); + } ); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert( function( el ) { + el.appendChild( document.createComment( "" ) ); + return !el.getElementsByTagName( "*" ).length; + } ); + + // Support: IE<9 + support.getElementsByClassName = rnative.test( document.getElementsByClassName ); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programmatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert( function( el ) { + docElem.appendChild( el ).id = expando; + return !document.getElementsByName || !document.getElementsByName( expando ).length; + } ); + + // ID filter and find + if ( support.getById ) { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute( "id" ) === attrId; + }; + }; + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var elem = context.getElementById( id ); + return elem ? [ elem ] : []; + } + }; + } else { + Expr.filter[ "ID" ] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== "undefined" && + elem.getAttributeNode( "id" ); + return node && node.value === attrId; + }; + }; + + // Support: IE 6 - 7 only + // getElementById is not reliable as a find shortcut + Expr.find[ "ID" ] = function( id, context ) { + if ( typeof context.getElementById !== "undefined" && documentIsHTML ) { + var node, i, elems, + elem = context.getElementById( id ); + + if ( elem ) { + + // Verify the id attribute + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + + // Fall back on getElementsByName + elems = context.getElementsByName( id ); + i = 0; + while ( ( elem = elems[ i++ ] ) ) { + node = elem.getAttributeNode( "id" ); + if ( node && node.value === id ) { + return [ elem ]; + } + } + } + + return []; + } + }; + } + + // Tag + Expr.find[ "TAG" ] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== "undefined" ) { + return context.getElementsByTagName( tag ); + + // DocumentFragment nodes don't have gEBTN + } else if ( support.qsa ) { + return context.querySelectorAll( tag ); + } + } : + + function( tag, context ) { + var elem, + tmp = [], + i = 0, + + // By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See https://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) { + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert( function( el ) { + + var input; + + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // https://bugs.jquery.com/ticket/12359 + docElem.appendChild( el ).innerHTML = "" + + ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !el.querySelectorAll( "[selected]" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+ + if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) { + rbuggyQSA.push( "~=" ); + } + + // Support: IE 11+, Edge 15 - 18+ + // IE 11/Edge don't find elements on a `[name='']` query in some cases. + // Adding a temporary attribute to the document before the selection works + // around the issue. + // Interestingly, IE 10 & older don't seem to have the issue. + input = document.createElement( "input" ); + input.setAttribute( "name", "" ); + el.appendChild( input ); + if ( !el.querySelectorAll( "[name='']" ).length ) { + rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" + + whitespace + "*(?:''|\"\")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !el.querySelectorAll( ":checked" ).length ) { + rbuggyQSA.push( ":checked" ); + } + + // Support: Safari 8+, iOS 8+ + // https://bugs.webkit.org/show_bug.cgi?id=136851 + // In-page `selector#id sibling-combinator selector` fails + if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) { + rbuggyQSA.push( ".#.+[+~]" ); + } + + // Support: Firefox <=3.6 - 5 only + // Old Firefox doesn't throw on a badly-escaped identifier. + el.querySelectorAll( "\\\f" ); + rbuggyQSA.push( "[\\r\\n\\f]" ); + } ); + + assert( function( el ) { + el.innerHTML = "" + + ""; + + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = document.createElement( "input" ); + input.setAttribute( "type", "hidden" ); + el.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( el.querySelectorAll( "[name=d]" ).length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: IE9-11+ + // IE's :disabled selector does not pick up the children of disabled fieldsets + docElem.appendChild( el ).disabled = true; + if ( el.querySelectorAll( ":disabled" ).length !== 2 ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Support: Opera 10 - 11 only + // Opera 10-11 does not throw on post-comma invalid pseudos + el.querySelectorAll( "*,:x" ); + rbuggyQSA.push( ",.*:" ); + } ); + } + + if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector ) ) ) ) { + + assert( function( el ) { + + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( el, "*" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( el, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + } ); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully self-exclusive + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + ) ); + } : + function( a, b ) { + if ( b ) { + while ( ( b = b.parentNode ) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + ( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) { + + // Choose the first element that is related to our preferred document + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( a == document || a.ownerDocument == preferredDoc && + contains( preferredDoc, a ) ) { + return -1; + } + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( b == document || b.ownerDocument == preferredDoc && + contains( preferredDoc, b ) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + return a == document ? -1 : + b == document ? 1 : + /* eslint-enable eqeqeq */ + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( ( cur = cur.parentNode ) ) { + ap.unshift( cur ); + } + cur = b; + while ( ( cur = cur.parentNode ) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[ i ] === bp[ i ] ) { + i++; + } + + return i ? + + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[ i ], bp[ i ] ) : + + // Otherwise nodes in our document sort first + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + /* eslint-disable eqeqeq */ + ap[ i ] == preferredDoc ? -1 : + bp[ i ] == preferredDoc ? 1 : + /* eslint-enable eqeqeq */ + 0; + }; + + return document; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + setDocument( elem ); + + if ( support.matchesSelector && documentIsHTML && + !nonnativeSelectorCache[ expr + " " ] && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch ( e ) { + nonnativeSelectorCache( expr, true ); + } + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( context.ownerDocument || context ) != document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + + // Set document vars if needed + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( ( elem.ownerDocument || elem ) != document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; +}; + +Sizzle.escape = function( sel ) { + return ( sel + "" ).replace( rcssescape, fcssescape ); +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( ( elem = results[ i++ ] ) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + + // If no nodeType, this is expected to be an array + while ( ( node = elem[ i++ ] ) ) { + + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[ 1 ] = match[ 1 ].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[ 3 ] = ( match[ 3 ] || match[ 4 ] || + match[ 5 ] || "" ).replace( runescape, funescape ); + + if ( match[ 2 ] === "~=" ) { + match[ 3 ] = " " + match[ 3 ] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[ 1 ] = match[ 1 ].toLowerCase(); + + if ( match[ 1 ].slice( 0, 3 ) === "nth" ) { + + // nth-* requires argument + if ( !match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[ 4 ] = +( match[ 4 ] ? + match[ 5 ] + ( match[ 6 ] || 1 ) : + 2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) ); + match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" ); + + // other types prohibit arguments + } else if ( match[ 3 ] ) { + Sizzle.error( match[ 0 ] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[ 6 ] && match[ 2 ]; + + if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[ 3 ] ) { + match[ 2 ] = match[ 4 ] || match[ 5 ] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + + // Get excess from tokenize (recursively) + ( excess = tokenize( unquoted, true ) ) && + + // advance to the next closing parenthesis + ( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) { + + // excess is a negative index + match[ 0 ] = match[ 0 ].slice( 0, excess ); + match[ 2 ] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { + return true; + } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + ( pattern = new RegExp( "(^|" + whitespace + + ")" + className + "(" + whitespace + "|$)" ) ) && classCache( + className, function( elem ) { + return pattern.test( + typeof elem.className === "string" && elem.className || + typeof elem.getAttribute !== "undefined" && + elem.getAttribute( "class" ) || + "" + ); + } ); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + /* eslint-disable max-len */ + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + /* eslint-enable max-len */ + + }; + }, + + "CHILD": function( type, what, _argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, _context, xml ) { + var cache, uniqueCache, outerCache, node, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType, + diff = false; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( ( node = node[ dir ] ) ) { + if ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) { + + return false; + } + } + + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + + // Seek `elem` from a previously-cached index + + // ...in a gzip-friendly way + node = parent; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex && cache[ 2 ]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( ( node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + uniqueCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + } else { + + // Use previously-cached element index if available + if ( useCache ) { + + // ...in a gzip-friendly way + node = elem; + outerCache = node[ expando ] || ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + cache = uniqueCache[ type ] || []; + nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ]; + diff = nodeIndex; + } + + // xml :nth-child(...) + // or :nth-last-child(...) or :nth(-last)?-of-type(...) + if ( diff === false ) { + + // Use the same loop as above to seek `elem` from the start + while ( ( node = ++nodeIndex && node && node[ dir ] || + ( diff = nodeIndex = 0 ) || start.pop() ) ) { + + if ( ( ofType ? + node.nodeName.toLowerCase() === name : + node.nodeType === 1 ) && + ++diff ) { + + // Cache the index of each encountered element + if ( useCache ) { + outerCache = node[ expando ] || + ( node[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ node.uniqueID ] || + ( outerCache[ node.uniqueID ] = {} ); + + uniqueCache[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction( function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf( seed, matched[ i ] ); + seed[ idx ] = !( matches[ idx ] = matched[ i ] ); + } + } ) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + + // Potentially complex pseudos + "not": markFunction( function( selector ) { + + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction( function( seed, matches, _context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( ( elem = unmatched[ i ] ) ) { + seed[ i ] = !( matches[ i ] = elem ); + } + } + } ) : + function( elem, _context, xml ) { + input[ 0 ] = elem; + matcher( input, null, xml, results ); + + // Don't keep the element (issue #299) + input[ 0 ] = null; + return !results.pop(); + }; + } ), + + "has": markFunction( function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + } ), + + "contains": markFunction( function( text ) { + text = text.replace( runescape, funescape ); + return function( elem ) { + return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1; + }; + } ), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + + // lang value must be a valid identifier + if ( !ridentifier.test( lang || "" ) ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( ( elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( ( elem = elem.parentNode ) && elem.nodeType === 1 ); + return false; + }; + } ), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && + ( !document.hasFocus || document.hasFocus() ) && + !!( elem.type || elem.href || ~elem.tabIndex ); + }, + + // Boolean properties + "enabled": createDisabledPseudo( false ), + "disabled": createDisabledPseudo( true ), + + "checked": function( elem ) { + + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return ( nodeName === "input" && !!elem.checked ) || + ( nodeName === "option" && !!elem.selected ); + }, + + "selected": function( elem ) { + + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + // eslint-disable-next-line no-unused-expressions + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos[ "empty" ]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( ( attr = elem.getAttribute( "type" ) ) == null || + attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo( function() { + return [ 0 ]; + } ), + + "last": createPositionalPseudo( function( _matchIndexes, length ) { + return [ length - 1 ]; + } ), + + "eq": createPositionalPseudo( function( _matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + } ), + + "even": createPositionalPseudo( function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "odd": createPositionalPseudo( function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "lt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? + argument + length : + argument > length ? + length : + argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ), + + "gt": createPositionalPseudo( function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + } ) + } +}; + +Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || ( match = rcomma.exec( soFar ) ) ) { + if ( match ) { + + // Don't consume trailing commas as valid + soFar = soFar.slice( match[ 0 ].length ) || soFar; + } + groups.push( ( tokens = [] ) ); + } + + matched = false; + + // Combinators + if ( ( match = rcombinators.exec( soFar ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + + // Cast descendant combinators to space + type: match[ 0 ].replace( rtrim, " " ) + } ); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] || + ( match = preFilters[ type ]( match ) ) ) ) { + matched = match.shift(); + tokens.push( { + value: matched, + type: type, + matches: match + } ); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[ i ].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + skip = combinator.next, + key = skip || dir, + checkNonElements = base && key === "parentNode", + doneName = done++; + + return combinator.first ? + + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + return false; + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, uniqueCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching + if ( xml ) { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( ( elem = elem[ dir ] ) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || ( elem[ expando ] = {} ); + + // Support: IE <9 only + // Defend against cloned attroperties (jQuery gh-1709) + uniqueCache = outerCache[ elem.uniqueID ] || + ( outerCache[ elem.uniqueID ] = {} ); + + if ( skip && skip === elem.nodeName.toLowerCase() ) { + elem = elem[ dir ] || elem; + } else if ( ( oldCache = uniqueCache[ key ] ) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return ( newCache[ 2 ] = oldCache[ 2 ] ); + } else { + + // Reuse newcache so results back-propagate to previous elements + uniqueCache[ key ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) { + return true; + } + } + } + } + } + return false; + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[ i ]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[ 0 ]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[ i ], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( ( elem = unmatched[ i ] ) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction( function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( + selector || "*", + context.nodeType ? [ context ] : context, + [] + ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( ( elem = temp[ i ] ) ) { + matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem ); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) ) { + + // Restore matcherIn since elem is not yet a final match + temp.push( ( matcherIn[ i ] = elem ) ); + } + } + postFinder( null, ( matcherOut = [] ), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( ( elem = matcherOut[ i ] ) && + ( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) { + + seed[ temp ] = !( results[ temp ] = elem ); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + } ); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[ 0 ].type ], + implicitRelative = leadingRelative || Expr.relative[ " " ], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + ( checkContext = context ).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + + // Avoid hanging onto element (issue #299) + checkContext = null; + return ret; + } ]; + + for ( ; i < len; i++ ) { + if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[ j ].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens + .slice( 0, i - 1 ) + .concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } ) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ), + + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ), + len = elems.length; + + if ( outermost ) { + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + outermostContext = context == document || context || outermost; + } + + // Add elements passing elementMatchers directly to results + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + + // Support: IE 11+, Edge 17 - 18+ + // IE/Edge sometimes throw a "Permission denied" error when strict-comparing + // two documents; shallow comparisons work. + // eslint-disable-next-line eqeqeq + if ( !context && elem.ownerDocument != document ) { + setDocument( elem ); + xml = !documentIsHTML; + } + while ( ( matcher = elementMatchers[ j++ ] ) ) { + if ( matcher( elem, context || document, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + + // They will have gone through all possible matchers + if ( ( elem = !matcher && elem ) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // `i` is now the count of elements visited above, and adding it to `matchedCount` + // makes the latter nonnegative. + matchedCount += i; + + // Apply set filters to unmatched elements + // NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount` + // equals `i`), unless we didn't visit _any_ elements in the above loop because we have + // no element matchers and no seed. + // Incrementing an initially-string "0" `i` allows `i` to remain a string only in that + // case, which will result in a "00" `matchedCount` that differs from `i` but is also + // numerically zero. + if ( bySet && i !== matchedCount ) { + j = 0; + while ( ( matcher = setMatchers[ j++ ] ) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !( unmatched[ i ] || setMatched[ i ] ) ) { + setMatched[ i ] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[ i ] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( + selector, + matcherFromGroupMatchers( elementMatchers, setMatchers ) + ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( ( selector = compiled.selector || selector ) ); + + results = results || []; + + // Try to minimize operations if there is only one selector in the list and no seed + // (the latter of which guarantees us context) + if ( match.length === 1 ) { + + // Reduce context if the leading compound selector is an ID + tokens = match[ 0 ] = match[ 0 ].slice( 0 ); + if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" && + context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) { + + context = ( Expr.find[ "ID" ]( token.matches[ 0 ] + .replace( runescape, funescape ), context ) || [] )[ 0 ]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[ i ]; + + // Abort if we hit a combinator + if ( Expr.relative[ ( type = token.type ) ] ) { + break; + } + if ( ( find = Expr.find[ type ] ) ) { + + // Search, expanding context for leading sibling combinators + if ( ( seed = find( + token.matches[ 0 ].replace( runescape, funescape ), + rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) || + context + ) ) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + !context || rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando; + +// Support: Chrome 14-35+ +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert( function( el ) { + + // Should return 1, but returns 4 (following) + return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1; +} ); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert( function( el ) { + el.innerHTML = ""; + return el.firstChild.getAttribute( "href" ) === "#"; +} ) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + } ); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert( function( el ) { + el.innerHTML = ""; + el.firstChild.setAttribute( "value", "" ); + return el.firstChild.getAttribute( "value" ) === ""; +} ) ) { + addHandle( "value", function( elem, _name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + } ); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert( function( el ) { + return el.getAttribute( "disabled" ) == null; +} ) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + ( val = elem.getAttributeNode( name ) ) && val.specified ? + val.value : + null; + } + } ); +} + +return Sizzle; + +} )( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; + +// Deprecated +jQuery.expr[ ":" ] = jQuery.expr.pseudos; +jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; +jQuery.escapeSelector = Sizzle.escape; + + + + +var dir = function( elem, dir, until ) { + var matched = [], + truncate = until !== undefined; + + while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) { + if ( elem.nodeType === 1 ) { + if ( truncate && jQuery( elem ).is( until ) ) { + break; + } + matched.push( elem ); + } + } + return matched; +}; + + +var siblings = function( n, elem ) { + var matched = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + matched.push( n ); + } + } + + return matched; +}; + + +var rneedsContext = jQuery.expr.match.needsContext; + + + +function nodeName( elem, name ) { + + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + +} +var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i ); + + + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + return !!qualifier.call( elem, i, elem ) !== not; + } ); + } + + // Single element + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + } ); + } + + // Arraylike of elements (jQuery, arguments, Array) + if ( typeof qualifier !== "string" ) { + return jQuery.grep( elements, function( elem ) { + return ( indexOf.call( qualifier, elem ) > -1 ) !== not; + } ); + } + + // Filtered directly for both simple and complex selectors + return jQuery.filter( qualifier, elements, not ); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + if ( elems.length === 1 && elem.nodeType === 1 ) { + return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : []; + } + + return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + } ) ); +}; + +jQuery.fn.extend( { + find: function( selector ) { + var i, ret, + len = this.length, + self = this; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter( function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + } ) ); + } + + ret = this.pushStack( [] ); + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + return len > 1 ? jQuery.uniqueSort( ret ) : ret; + }, + filter: function( selector ) { + return this.pushStack( winnow( this, selector || [], false ) ); + }, + not: function( selector ) { + return this.pushStack( winnow( this, selector || [], true ) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +} ); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + // Shortcut simple #id case for speed + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/, + + init = jQuery.fn.init = function( selector, context, root ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Method init() accepts an alternate rootjQuery + // so migrate can support jQuery.sub (gh-2101) + root = root || rootjQuery; + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector[ 0 ] === "<" && + selector[ selector.length - 1 ] === ">" && + selector.length >= 3 ) { + + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && ( match[ 1 ] || !context ) ) { + + // HANDLE: $(html) -> $(array) + if ( match[ 1 ] ) { + context = context instanceof jQuery ? context[ 0 ] : context; + + // Option to run scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[ 1 ], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + + // Properties of context are called as methods if possible + if ( isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[ 2 ] ); + + if ( elem ) { + + // Inject the element directly into the jQuery object + this[ 0 ] = elem; + this.length = 1; + } + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || root ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this[ 0 ] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( isFunction( selector ) ) { + return root.ready !== undefined ? + root.ready( selector ) : + + // Execute immediately if ready is not present + selector( jQuery ); + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + + // Methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend( { + has: function( target ) { + var targets = jQuery( target, this ), + l = targets.length; + + return this.filter( function() { + var i = 0; + for ( ; i < l; i++ ) { + if ( jQuery.contains( this, targets[ i ] ) ) { + return true; + } + } + } ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + targets = typeof selectors !== "string" && jQuery( selectors ); + + // Positional selectors never match, since there's no _selection_ context + if ( !rneedsContext.test( selectors ) ) { + for ( ; i < l; i++ ) { + for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) { + + // Always skip document fragments + if ( cur.nodeType < 11 && ( targets ? + targets.index( cur ) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector( cur, selectors ) ) ) { + + matched.push( cur ); + break; + } + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched ); + }, + + // Determine the position of an element within the set + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1; + } + + // Index in selector + if ( typeof elem === "string" ) { + return indexOf.call( jQuery( elem ), this[ 0 ] ); + } + + // Locate the position of the desired element + return indexOf.call( this, + + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[ 0 ] : elem + ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.uniqueSort( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter( selector ) + ); + } +} ); + +function sibling( cur, dir ) { + while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {} + return cur; +} + +jQuery.each( { + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, _i, until ) { + return dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, _i, until ) { + return dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, _i, until ) { + return dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return siblings( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return siblings( elem.firstChild ); + }, + contents: function( elem ) { + if ( elem.contentDocument != null && + + // Support: IE 11+ + // elements with no `data` attribute has an object + // `contentDocument` with a `null` prototype. + getProto( elem.contentDocument ) ) { + + return elem.contentDocument; + } + + // Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only + // Treat the template element as a regular one in browsers that + // don't support it. + if ( nodeName( elem, "template" ) ) { + elem = elem.content || elem; + } + + return jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var matched = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + matched = jQuery.filter( selector, matched ); + } + + if ( this.length > 1 ) { + + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + jQuery.uniqueSort( matched ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + matched.reverse(); + } + } + + return this.pushStack( matched ); + }; +} ); +var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g ); + + + +// Convert String-formatted options into Object-formatted ones +function createOptions( options ) { + var object = {}; + jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) { + object[ flag ] = true; + } ); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + createOptions( options ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + + // Last fire value for non-forgettable lists + memory, + + // Flag to know if list was already fired + fired, + + // Flag to prevent firing + locked, + + // Actual callback list + list = [], + + // Queue of execution data for repeatable lists + queue = [], + + // Index of currently firing callback (modified by add/remove as needed) + firingIndex = -1, + + // Fire callbacks + fire = function() { + + // Enforce single-firing + locked = locked || options.once; + + // Execute callbacks for all pending executions, + // respecting firingIndex overrides and runtime changes + fired = firing = true; + for ( ; queue.length; firingIndex = -1 ) { + memory = queue.shift(); + while ( ++firingIndex < list.length ) { + + // Run callback and check for early termination + if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false && + options.stopOnFalse ) { + + // Jump to end and forget the data so .add doesn't re-fire + firingIndex = list.length; + memory = false; + } + } + } + + // Forget the data if we're done with it + if ( !options.memory ) { + memory = false; + } + + firing = false; + + // Clean up if we're done firing for good + if ( locked ) { + + // Keep an empty list if we have data for future add calls + if ( memory ) { + list = []; + + // Otherwise, this object is spent + } else { + list = ""; + } + } + }, + + // Actual Callbacks object + self = { + + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + + // If we have memory from a past run, we should fire after adding + if ( memory && !firing ) { + firingIndex = list.length - 1; + queue.push( memory ); + } + + ( function add( args ) { + jQuery.each( args, function( _, arg ) { + if ( isFunction( arg ) ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && toType( arg ) !== "string" ) { + + // Inspect recursively + add( arg ); + } + } ); + } )( arguments ); + + if ( memory && !firing ) { + fire(); + } + } + return this; + }, + + // Remove a callback from the list + remove: function() { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + + // Handle firing indexes + if ( index <= firingIndex ) { + firingIndex--; + } + } + } ); + return this; + }, + + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? + jQuery.inArray( fn, list ) > -1 : + list.length > 0; + }, + + // Remove all callbacks from the list + empty: function() { + if ( list ) { + list = []; + } + return this; + }, + + // Disable .fire and .add + // Abort any current/pending executions + // Clear all callbacks and values + disable: function() { + locked = queue = []; + list = memory = ""; + return this; + }, + disabled: function() { + return !list; + }, + + // Disable .fire + // Also disable .add unless we have memory (since it would have no effect) + // Abort any pending executions + lock: function() { + locked = queue = []; + if ( !memory && !firing ) { + list = memory = ""; + } + return this; + }, + locked: function() { + return !!locked; + }, + + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( !locked ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + queue.push( args ); + if ( !firing ) { + fire(); + } + } + return this; + }, + + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +function Identity( v ) { + return v; +} +function Thrower( ex ) { + throw ex; +} + +function adoptValue( value, resolve, reject, noValue ) { + var method; + + try { + + // Check for promise aspect first to privilege synchronous behavior + if ( value && isFunction( ( method = value.promise ) ) ) { + method.call( value ).done( resolve ).fail( reject ); + + // Other thenables + } else if ( value && isFunction( ( method = value.then ) ) ) { + method.call( value, resolve, reject ); + + // Other non-thenables + } else { + + // Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer: + // * false: [ value ].slice( 0 ) => resolve( value ) + // * true: [ value ].slice( 1 ) => resolve() + resolve.apply( undefined, [ value ].slice( noValue ) ); + } + + // For Promises/A+, convert exceptions into rejections + // Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in + // Deferred#then to conditionally suppress rejection. + } catch ( value ) { + + // Support: Android 4.0 only + // Strict mode functions invoked without .call/.apply get global-object context + reject.apply( undefined, [ value ] ); + } +} + +jQuery.extend( { + + Deferred: function( func ) { + var tuples = [ + + // action, add listener, callbacks, + // ... .then handlers, argument index, [final state] + [ "notify", "progress", jQuery.Callbacks( "memory" ), + jQuery.Callbacks( "memory" ), 2 ], + [ "resolve", "done", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 0, "resolved" ], + [ "reject", "fail", jQuery.Callbacks( "once memory" ), + jQuery.Callbacks( "once memory" ), 1, "rejected" ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + "catch": function( fn ) { + return promise.then( null, fn ); + }, + + // Keep pipe for back-compat + pipe: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + + return jQuery.Deferred( function( newDefer ) { + jQuery.each( tuples, function( _i, tuple ) { + + // Map tuples (progress, done, fail) to arguments (done, fail, progress) + var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ]; + + // deferred.progress(function() { bind to newDefer or newDefer.notify }) + // deferred.done(function() { bind to newDefer or newDefer.resolve }) + // deferred.fail(function() { bind to newDefer or newDefer.reject }) + deferred[ tuple[ 1 ] ]( function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && isFunction( returned.promise ) ) { + returned.promise() + .progress( newDefer.notify ) + .done( newDefer.resolve ) + .fail( newDefer.reject ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( + this, + fn ? [ returned ] : arguments + ); + } + } ); + } ); + fns = null; + } ).promise(); + }, + then: function( onFulfilled, onRejected, onProgress ) { + var maxDepth = 0; + function resolve( depth, deferred, handler, special ) { + return function() { + var that = this, + args = arguments, + mightThrow = function() { + var returned, then; + + // Support: Promises/A+ section 2.3.3.3.3 + // https://promisesaplus.com/#point-59 + // Ignore double-resolution attempts + if ( depth < maxDepth ) { + return; + } + + returned = handler.apply( that, args ); + + // Support: Promises/A+ section 2.3.1 + // https://promisesaplus.com/#point-48 + if ( returned === deferred.promise() ) { + throw new TypeError( "Thenable self-resolution" ); + } + + // Support: Promises/A+ sections 2.3.3.1, 3.5 + // https://promisesaplus.com/#point-54 + // https://promisesaplus.com/#point-75 + // Retrieve `then` only once + then = returned && + + // Support: Promises/A+ section 2.3.4 + // https://promisesaplus.com/#point-64 + // Only check objects and functions for thenability + ( typeof returned === "object" || + typeof returned === "function" ) && + returned.then; + + // Handle a returned thenable + if ( isFunction( then ) ) { + + // Special processors (notify) just wait for resolution + if ( special ) { + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ) + ); + + // Normal processors (resolve) also hook into progress + } else { + + // ...and disregard older resolution values + maxDepth++; + + then.call( + returned, + resolve( maxDepth, deferred, Identity, special ), + resolve( maxDepth, deferred, Thrower, special ), + resolve( maxDepth, deferred, Identity, + deferred.notifyWith ) + ); + } + + // Handle all other returned values + } else { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Identity ) { + that = undefined; + args = [ returned ]; + } + + // Process the value(s) + // Default process is resolve + ( special || deferred.resolveWith )( that, args ); + } + }, + + // Only normal processors (resolve) catch and reject exceptions + process = special ? + mightThrow : + function() { + try { + mightThrow(); + } catch ( e ) { + + if ( jQuery.Deferred.exceptionHook ) { + jQuery.Deferred.exceptionHook( e, + process.stackTrace ); + } + + // Support: Promises/A+ section 2.3.3.3.4.1 + // https://promisesaplus.com/#point-61 + // Ignore post-resolution exceptions + if ( depth + 1 >= maxDepth ) { + + // Only substitute handlers pass on context + // and multiple values (non-spec behavior) + if ( handler !== Thrower ) { + that = undefined; + args = [ e ]; + } + + deferred.rejectWith( that, args ); + } + } + }; + + // Support: Promises/A+ section 2.3.3.3.1 + // https://promisesaplus.com/#point-57 + // Re-resolve promises immediately to dodge false rejection from + // subsequent errors + if ( depth ) { + process(); + } else { + + // Call an optional hook to record the stack, in case of exception + // since it's otherwise lost when execution goes async + if ( jQuery.Deferred.getStackHook ) { + process.stackTrace = jQuery.Deferred.getStackHook(); + } + window.setTimeout( process ); + } + }; + } + + return jQuery.Deferred( function( newDefer ) { + + // progress_handlers.add( ... ) + tuples[ 0 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onProgress ) ? + onProgress : + Identity, + newDefer.notifyWith + ) + ); + + // fulfilled_handlers.add( ... ) + tuples[ 1 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onFulfilled ) ? + onFulfilled : + Identity + ) + ); + + // rejected_handlers.add( ... ) + tuples[ 2 ][ 3 ].add( + resolve( + 0, + newDefer, + isFunction( onRejected ) ? + onRejected : + Thrower + ) + ); + } ).promise(); + }, + + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 5 ]; + + // promise.progress = list.add + // promise.done = list.add + // promise.fail = list.add + promise[ tuple[ 1 ] ] = list.add; + + // Handle state + if ( stateString ) { + list.add( + function() { + + // state = "resolved" (i.e., fulfilled) + // state = "rejected" + state = stateString; + }, + + // rejected_callbacks.disable + // fulfilled_callbacks.disable + tuples[ 3 - i ][ 2 ].disable, + + // rejected_handlers.disable + // fulfilled_handlers.disable + tuples[ 3 - i ][ 3 ].disable, + + // progress_callbacks.lock + tuples[ 0 ][ 2 ].lock, + + // progress_handlers.lock + tuples[ 0 ][ 3 ].lock + ); + } + + // progress_handlers.fire + // fulfilled_handlers.fire + // rejected_handlers.fire + list.add( tuple[ 3 ].fire ); + + // deferred.notify = function() { deferred.notifyWith(...) } + // deferred.resolve = function() { deferred.resolveWith(...) } + // deferred.reject = function() { deferred.rejectWith(...) } + deferred[ tuple[ 0 ] ] = function() { + deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments ); + return this; + }; + + // deferred.notifyWith = list.fireWith + // deferred.resolveWith = list.fireWith + // deferred.rejectWith = list.fireWith + deferred[ tuple[ 0 ] + "With" ] = list.fireWith; + } ); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( singleValue ) { + var + + // count of uncompleted subordinates + remaining = arguments.length, + + // count of unprocessed arguments + i = remaining, + + // subordinate fulfillment data + resolveContexts = Array( i ), + resolveValues = slice.call( arguments ), + + // the primary Deferred + primary = jQuery.Deferred(), + + // subordinate callback factory + updateFunc = function( i ) { + return function( value ) { + resolveContexts[ i ] = this; + resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( !( --remaining ) ) { + primary.resolveWith( resolveContexts, resolveValues ); + } + }; + }; + + // Single- and empty arguments are adopted like Promise.resolve + if ( remaining <= 1 ) { + adoptValue( singleValue, primary.done( updateFunc( i ) ).resolve, primary.reject, + !remaining ); + + // Use .then() to unwrap secondary thenables (cf. gh-3000) + if ( primary.state() === "pending" || + isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) { + + return primary.then(); + } + } + + // Multiple arguments are aggregated like Promise.all array elements + while ( i-- ) { + adoptValue( resolveValues[ i ], updateFunc( i ), primary.reject ); + } + + return primary.promise(); + } +} ); + + +// These usually indicate a programmer mistake during development, +// warn about them ASAP rather than swallowing them by default. +var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/; + +jQuery.Deferred.exceptionHook = function( error, stack ) { + + // Support: IE 8 - 9 only + // Console exists when dev tools are open, which can happen at any time + if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) { + window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack ); + } +}; + + + + +jQuery.readyException = function( error ) { + window.setTimeout( function() { + throw error; + } ); +}; + + + + +// The deferred used on DOM ready +var readyList = jQuery.Deferred(); + +jQuery.fn.ready = function( fn ) { + + readyList + .then( fn ) + + // Wrap jQuery.readyException in a function so that the lookup + // happens at the time of error handling instead of callback + // registration. + .catch( function( error ) { + jQuery.readyException( error ); + } ); + + return this; +}; + +jQuery.extend( { + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + } +} ); + +jQuery.ready.then = readyList.then; + +// The ready event handler and self cleanup method +function completed() { + document.removeEventListener( "DOMContentLoaded", completed ); + window.removeEventListener( "load", completed ); + jQuery.ready(); +} + +// Catch cases where $(document).ready() is called +// after the browser event has already occurred. +// Support: IE <=9 - 10 only +// Older IE sometimes signals "interactive" too soon +if ( document.readyState === "complete" || + ( document.readyState !== "loading" && !document.documentElement.doScroll ) ) { + + // Handle it asynchronously to allow scripts the opportunity to delay ready + window.setTimeout( jQuery.ready ); + +} else { + + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed ); +} + + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + len = elems.length, + bulk = key == null; + + // Sets many values + if ( toType( key ) === "object" ) { + chainable = true; + for ( i in key ) { + access( elems, fn, i, key[ i ], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, _key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < len; i++ ) { + fn( + elems[ i ], key, raw ? + value : + value.call( elems[ i ], i, fn( elems[ i ], key ) ) + ); + } + } + } + + if ( chainable ) { + return elems; + } + + // Gets + if ( bulk ) { + return fn.call( elems ); + } + + return len ? fn( elems[ 0 ], key ) : emptyGet; +}; + + +// Matches dashed string for camelizing +var rmsPrefix = /^-ms-/, + rdashAlpha = /-([a-z])/g; + +// Used by camelCase as callback to replace() +function fcamelCase( _all, letter ) { + return letter.toUpperCase(); +} + +// Convert dashed to camelCase; used by the css and data modules +// Support: IE <=9 - 11, Edge 12 - 15 +// Microsoft forgot to hump their vendor prefix (#9572) +function camelCase( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); +} +var acceptData = function( owner ) { + + // Accepts only: + // - Node + // - Node.ELEMENT_NODE + // - Node.DOCUMENT_NODE + // - Object + // - Any + return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType ); +}; + + + + +function Data() { + this.expando = jQuery.expando + Data.uid++; +} + +Data.uid = 1; + +Data.prototype = { + + cache: function( owner ) { + + // Check if the owner object already has a cache + var value = owner[ this.expando ]; + + // If not, create one + if ( !value ) { + value = {}; + + // We can accept data for non-element nodes in modern browsers, + // but we should not, see #8335. + // Always return an empty object. + if ( acceptData( owner ) ) { + + // If it is a node unlikely to be stringify-ed or looped over + // use plain assignment + if ( owner.nodeType ) { + owner[ this.expando ] = value; + + // Otherwise secure it in a non-enumerable property + // configurable must be true to allow the property to be + // deleted when data is removed + } else { + Object.defineProperty( owner, this.expando, { + value: value, + configurable: true + } ); + } + } + } + + return value; + }, + set: function( owner, data, value ) { + var prop, + cache = this.cache( owner ); + + // Handle: [ owner, key, value ] args + // Always use camelCase key (gh-2257) + if ( typeof data === "string" ) { + cache[ camelCase( data ) ] = value; + + // Handle: [ owner, { properties } ] args + } else { + + // Copy the properties one-by-one to the cache object + for ( prop in data ) { + cache[ camelCase( prop ) ] = data[ prop ]; + } + } + return cache; + }, + get: function( owner, key ) { + return key === undefined ? + this.cache( owner ) : + + // Always use camelCase key (gh-2257) + owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ]; + }, + access: function( owner, key, value ) { + + // In cases where either: + // + // 1. No key was specified + // 2. A string key was specified, but no value provided + // + // Take the "read" path and allow the get method to determine + // which value to return, respectively either: + // + // 1. The entire cache object + // 2. The data stored at the key + // + if ( key === undefined || + ( ( key && typeof key === "string" ) && value === undefined ) ) { + + return this.get( owner, key ); + } + + // When the key is not a string, or both a key and value + // are specified, set or extend (existing objects) with either: + // + // 1. An object of properties + // 2. A key and value + // + this.set( owner, key, value ); + + // Since the "set" path can have two possible entry points + // return the expected data based on which path was taken[*] + return value !== undefined ? value : key; + }, + remove: function( owner, key ) { + var i, + cache = owner[ this.expando ]; + + if ( cache === undefined ) { + return; + } + + if ( key !== undefined ) { + + // Support array or space separated string of keys + if ( Array.isArray( key ) ) { + + // If key is an array of keys... + // We always set camelCase keys, so remove that. + key = key.map( camelCase ); + } else { + key = camelCase( key ); + + // If a key with the spaces exists, use it. + // Otherwise, create an array by matching non-whitespace + key = key in cache ? + [ key ] : + ( key.match( rnothtmlwhite ) || [] ); + } + + i = key.length; + + while ( i-- ) { + delete cache[ key[ i ] ]; + } + } + + // Remove the expando if there's no more data + if ( key === undefined || jQuery.isEmptyObject( cache ) ) { + + // Support: Chrome <=35 - 45 + // Webkit & Blink performance suffers when deleting properties + // from DOM nodes, so set to undefined instead + // https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted) + if ( owner.nodeType ) { + owner[ this.expando ] = undefined; + } else { + delete owner[ this.expando ]; + } + } + }, + hasData: function( owner ) { + var cache = owner[ this.expando ]; + return cache !== undefined && !jQuery.isEmptyObject( cache ); + } +}; +var dataPriv = new Data(); + +var dataUser = new Data(); + + + +// Implementation Summary +// +// 1. Enforce API surface and semantic compatibility with 1.9.x branch +// 2. Improve the module's maintainability by reducing the storage +// paths to a single mechanism. +// 3. Use the same single mechanism to support "private" and "user" data. +// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData) +// 5. Avoid exposing implementation details on user objects (eg. expando properties) +// 6. Provide a clear path for implementation upgrade to WeakMap in 2014 + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /[A-Z]/g; + +function getData( data ) { + if ( data === "true" ) { + return true; + } + + if ( data === "false" ) { + return false; + } + + if ( data === "null" ) { + return null; + } + + // Only convert to a number if it doesn't change the string + if ( data === +data + "" ) { + return +data; + } + + if ( rbrace.test( data ) ) { + return JSON.parse( data ); + } + + return data; +} + +function dataAttr( elem, key, data ) { + var name; + + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase(); + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = getData( data ); + } catch ( e ) {} + + // Make sure we set the data so it isn't changed later + dataUser.set( elem, key, data ); + } else { + data = undefined; + } + } + return data; +} + +jQuery.extend( { + hasData: function( elem ) { + return dataUser.hasData( elem ) || dataPriv.hasData( elem ); + }, + + data: function( elem, name, data ) { + return dataUser.access( elem, name, data ); + }, + + removeData: function( elem, name ) { + dataUser.remove( elem, name ); + }, + + // TODO: Now that all calls to _data and _removeData have been replaced + // with direct calls to dataPriv methods, these can be deprecated. + _data: function( elem, name, data ) { + return dataPriv.access( elem, name, data ); + }, + + _removeData: function( elem, name ) { + dataPriv.remove( elem, name ); + } +} ); + +jQuery.fn.extend( { + data: function( key, value ) { + var i, name, data, + elem = this[ 0 ], + attrs = elem && elem.attributes; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = dataUser.get( elem ); + + if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE 11 only + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = camelCase( name.slice( 5 ) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + dataPriv.set( elem, "hasDataAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each( function() { + dataUser.set( this, key ); + } ); + } + + return access( this, function( value ) { + var data; + + // The calling jQuery object (element matches) is not empty + // (and therefore has an element appears at this[ 0 ]) and the + // `value` parameter was not undefined. An empty jQuery object + // will result in `undefined` for elem = this[ 0 ] which will + // throw an exception if an attempt to read a data cache is made. + if ( elem && value === undefined ) { + + // Attempt to get data from the cache + // The key will always be camelCased in Data + data = dataUser.get( elem, key ); + if ( data !== undefined ) { + return data; + } + + // Attempt to "discover" the data in + // HTML5 custom data-* attrs + data = dataAttr( elem, key ); + if ( data !== undefined ) { + return data; + } + + // We tried really hard, but the data doesn't exist. + return; + } + + // Set the data... + this.each( function() { + + // We always store the camelCased key + dataUser.set( this, key, value ); + } ); + }, null, value, arguments.length > 1, null, true ); + }, + + removeData: function( key ) { + return this.each( function() { + dataUser.remove( this, key ); + } ); + } +} ); + + +jQuery.extend( { + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = dataPriv.get( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || Array.isArray( data ) ) { + queue = dataPriv.access( elem, type, jQuery.makeArray( data ) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // Clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // Not public - generate a queueHooks object, or return the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return dataPriv.get( elem, key ) || dataPriv.access( elem, key, { + empty: jQuery.Callbacks( "once memory" ).add( function() { + dataPriv.remove( elem, [ type + "queue", key ] ); + } ) + } ); + } +} ); + +jQuery.fn.extend( { + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[ 0 ], type ); + } + + return data === undefined ? + this : + this.each( function() { + var queue = jQuery.queue( this, type, data ); + + // Ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[ 0 ] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + } ); + }, + dequeue: function( type ) { + return this.each( function() { + jQuery.dequeue( this, type ); + } ); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = dataPriv.get( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +} ); +var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source; + +var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" ); + + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var documentElement = document.documentElement; + + + + var isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ); + }, + composed = { composed: true }; + + // Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only + // Check attachment across shadow DOM boundaries when possible (gh-3504) + // Support: iOS 10.0-10.2 only + // Early iOS 10 versions support `attachShadow` but not `getRootNode`, + // leading to errors. We need to check for `getRootNode`. + if ( documentElement.getRootNode ) { + isAttached = function( elem ) { + return jQuery.contains( elem.ownerDocument, elem ) || + elem.getRootNode( composed ) === elem.ownerDocument; + }; + } +var isHiddenWithinTree = function( elem, el ) { + + // isHiddenWithinTree might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + + // Inline style trumps all + return elem.style.display === "none" || + elem.style.display === "" && + + // Otherwise, check computed style + // Support: Firefox <=43 - 45 + // Disconnected elements can have computed display: none, so first confirm that elem is + // in the document. + isAttached( elem ) && + + jQuery.css( elem, "display" ) === "none"; + }; + + + +function adjustCSS( elem, prop, valueParts, tween ) { + var adjusted, scale, + maxIterations = 20, + currentValue = tween ? + function() { + return tween.cur(); + } : + function() { + return jQuery.css( elem, prop, "" ); + }, + initial = currentValue(), + unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ), + + // Starting value computation is required for potential unit mismatches + initialInUnit = elem.nodeType && + ( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) && + rcssNum.exec( jQuery.css( elem, prop ) ); + + if ( initialInUnit && initialInUnit[ 3 ] !== unit ) { + + // Support: Firefox <=54 + // Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144) + initial = initial / 2; + + // Trust units reported by jQuery.css + unit = unit || initialInUnit[ 3 ]; + + // Iteratively approximate from a nonzero starting point + initialInUnit = +initial || 1; + + while ( maxIterations-- ) { + + // Evaluate and update our best guess (doubling guesses that zero out). + // Finish if the scale equals or crosses 1 (making the old*new product non-positive). + jQuery.style( elem, prop, initialInUnit + unit ); + if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) { + maxIterations = 0; + } + initialInUnit = initialInUnit / scale; + + } + + initialInUnit = initialInUnit * 2; + jQuery.style( elem, prop, initialInUnit + unit ); + + // Make sure we update the tween properties later on + valueParts = valueParts || []; + } + + if ( valueParts ) { + initialInUnit = +initialInUnit || +initial || 0; + + // Apply relative offset (+=/-=) if specified + adjusted = valueParts[ 1 ] ? + initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] : + +valueParts[ 2 ]; + if ( tween ) { + tween.unit = unit; + tween.start = initialInUnit; + tween.end = adjusted; + } + } + return adjusted; +} + + +var defaultDisplayMap = {}; + +function getDefaultDisplay( elem ) { + var temp, + doc = elem.ownerDocument, + nodeName = elem.nodeName, + display = defaultDisplayMap[ nodeName ]; + + if ( display ) { + return display; + } + + temp = doc.body.appendChild( doc.createElement( nodeName ) ); + display = jQuery.css( temp, "display" ); + + temp.parentNode.removeChild( temp ); + + if ( display === "none" ) { + display = "block"; + } + defaultDisplayMap[ nodeName ] = display; + + return display; +} + +function showHide( elements, show ) { + var display, elem, + values = [], + index = 0, + length = elements.length; + + // Determine new display value for elements that need to change + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + + display = elem.style.display; + if ( show ) { + + // Since we force visibility upon cascade-hidden elements, an immediate (and slow) + // check is required in this first loop unless we have a nonempty display value (either + // inline or about-to-be-restored) + if ( display === "none" ) { + values[ index ] = dataPriv.get( elem, "display" ) || null; + if ( !values[ index ] ) { + elem.style.display = ""; + } + } + if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) { + values[ index ] = getDefaultDisplay( elem ); + } + } else { + if ( display !== "none" ) { + values[ index ] = "none"; + + // Remember what we're overwriting + dataPriv.set( elem, "display", display ); + } + } + } + + // Set the display of the elements in a second loop to avoid constant reflow + for ( index = 0; index < length; index++ ) { + if ( values[ index ] != null ) { + elements[ index ].style.display = values[ index ]; + } + } + + return elements; +} + +jQuery.fn.extend( { + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state ) { + if ( typeof state === "boolean" ) { + return state ? this.show() : this.hide(); + } + + return this.each( function() { + if ( isHiddenWithinTree( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + } ); + } +} ); +var rcheckableType = ( /^(?:checkbox|radio)$/i ); + +var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i ); + +var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i ); + + + +( function() { + var fragment = document.createDocumentFragment(), + div = fragment.appendChild( document.createElement( "div" ) ), + input = document.createElement( "input" ); + + // Support: Android 4.0 - 4.3 only + // Check state lost if the name is set (#11217) + // Support: Windows Web Apps (WWA) + // `name` and `type` must use .setAttribute for WWA (#14901) + input.setAttribute( "type", "radio" ); + input.setAttribute( "checked", "checked" ); + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + + // Support: Android <=4.1 only + // Older WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE <=11 only + // Make sure textarea (and checkbox) defaultValue is properly cloned + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // Support: IE <=9 only + // IE <=9 replaces "; + support.option = !!div.lastChild; +} )(); + + +// We have to close these tags to support XHTML (#13200) +var wrapMap = { + + // XHTML parsers do not magically insert elements in the + // same way that tag soup parsers do. So we cannot shorten + // this by omitting or other required elements. + thead: [ 1, "", "
" ], + col: [ 2, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + _default: [ 0, "", "" ] +}; + +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// Support: IE <=9 only +if ( !support.option ) { + wrapMap.optgroup = wrapMap.option = [ 1, "" ]; +} + + +function getAll( context, tag ) { + + // Support: IE <=9 - 11 only + // Use typeof to avoid zero-argument method invocation on host objects (#15151) + var ret; + + if ( typeof context.getElementsByTagName !== "undefined" ) { + ret = context.getElementsByTagName( tag || "*" ); + + } else if ( typeof context.querySelectorAll !== "undefined" ) { + ret = context.querySelectorAll( tag || "*" ); + + } else { + ret = []; + } + + if ( tag === undefined || tag && nodeName( context, tag ) ) { + return jQuery.merge( [ context ], ret ); + } + + return ret; +} + + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + dataPriv.set( + elems[ i ], + "globalEval", + !refElements || dataPriv.get( refElements[ i ], "globalEval" ) + ); + } +} + + +var rhtml = /<|&#?\w+;/; + +function buildFragment( elems, context, scripts, selection, ignored ) { + var elem, tmp, tag, wrap, attached, j, + fragment = context.createDocumentFragment(), + nodes = [], + i = 0, + l = elems.length; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( toType( elem ) === "object" ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || fragment.appendChild( context.createElement( "div" ) ); + + // Deserialize a standard representation + tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ]; + + // Descend through wrappers to the right content + j = wrap[ 0 ]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( nodes, tmp.childNodes ); + + // Remember the top-level container + tmp = fragment.firstChild; + + // Ensure the created nodes are orphaned (#12392) + tmp.textContent = ""; + } + } + } + + // Remove wrapper from fragment + fragment.textContent = ""; + + i = 0; + while ( ( elem = nodes[ i++ ] ) ) { + + // Skip elements already in the context collection (trac-4087) + if ( selection && jQuery.inArray( elem, selection ) > -1 ) { + if ( ignored ) { + ignored.push( elem ); + } + continue; + } + + attached = isAttached( elem ); + + // Append to fragment + tmp = getAll( fragment.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( attached ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( ( elem = tmp[ j++ ] ) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + return fragment; +} + + +var rtypenamespace = /^([^.]*)(?:\.(.+)|)/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +// Support: IE <=9 - 11+ +// focus() and blur() are asynchronous, except when they are no-op. +// So expect focus to be synchronous when the element is already active, +// and blur to be synchronous when the element is not already active. +// (focus and blur are always synchronous in other supported browsers, +// this just defines when we can count on it). +function expectSync( elem, type ) { + return ( elem === safeActiveElement() ) === ( type === "focus" ); +} + +// Support: IE <=9 only +// Accessing document.activeElement can throw unexpectedly +// https://bugs.jquery.com/ticket/13393 +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +function on( elem, types, selector, data, fn, one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + on( elem, type, selector, data, types[ type ], one ); + } + return elem; + } + + if ( data == null && fn == null ) { + + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return elem; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return elem.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + } ); +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + + var handleObjIn, eventHandle, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.get( elem ); + + // Only attach events to objects that accept data + if ( !acceptData( elem ) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Ensure that invalid selectors throw exceptions at attach time + // Evaluate against documentElement in case elem is a non-element node (e.g., document) + if ( selector ) { + jQuery.find.matchesSelector( documentElement, selector ); + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !( events = elemData.events ) ) { + events = elemData.events = Object.create( null ); + } + if ( !( eventHandle = elemData.handle ) ) { + eventHandle = elemData.handle = function( e ) { + + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ? + jQuery.event.dispatch.apply( elem, arguments ) : undefined; + }; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend( { + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join( "." ) + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !( handlers = events[ type ] ) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener if the special events handler returns false + if ( !special.setup || + special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var j, origCount, tmp, + events, t, handleObj, + special, handlers, type, namespaces, origType, + elemData = dataPriv.hasData( elem ) && dataPriv.get( elem ); + + if ( !elemData || !( events = elemData.events ) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnothtmlwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[ t ] ) || []; + type = origType = tmp[ 1 ]; + namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[ 2 ] && + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || + selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || + special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove data and the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + dataPriv.remove( elem, "handle events" ); + } + }, + + dispatch: function( nativeEvent ) { + + var i, j, ret, matched, handleObj, handlerQueue, + args = new Array( arguments.length ), + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( nativeEvent ), + + handlers = ( + dataPriv.get( this, "events" ) || Object.create( null ) + )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[ 0 ] = event; + + for ( i = 1; i < arguments.length; i++ ) { + args[ i ] = arguments[ i ]; + } + + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( ( handleObj = matched.handlers[ j++ ] ) && + !event.isImmediatePropagationStopped() ) { + + // If the event is namespaced, then each handler is only invoked if it is + // specially universal or its namespaces are a superset of the event's. + if ( !event.rnamespace || handleObj.namespace === false || + event.rnamespace.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle || + handleObj.handler ).apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( ( event.result = ret ) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var i, handleObj, sel, matchedHandlers, matchedSelectors, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + if ( delegateCount && + + // Support: IE <=9 + // Black-hole SVG instance trees (trac-13180) + cur.nodeType && + + // Support: Firefox <=42 + // Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861) + // https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click + // Support: IE 11 only + // ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343) + !( event.type === "click" && event.button >= 1 ) ) { + + for ( ; cur !== this; cur = cur.parentNode || this ) { + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) { + matchedHandlers = []; + matchedSelectors = {}; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matchedSelectors[ sel ] === undefined ) { + matchedSelectors[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) > -1 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matchedSelectors[ sel ] ) { + matchedHandlers.push( handleObj ); + } + } + if ( matchedHandlers.length ) { + handlerQueue.push( { elem: cur, handlers: matchedHandlers } ); + } + } + } + } + + // Add the remaining (directly-bound) handlers + cur = this; + if ( delegateCount < handlers.length ) { + handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } ); + } + + return handlerQueue; + }, + + addProp: function( name, hook ) { + Object.defineProperty( jQuery.Event.prototype, name, { + enumerable: true, + configurable: true, + + get: isFunction( hook ) ? + function() { + if ( this.originalEvent ) { + return hook( this.originalEvent ); + } + } : + function() { + if ( this.originalEvent ) { + return this.originalEvent[ name ]; + } + }, + + set: function( value ) { + Object.defineProperty( this, name, { + enumerable: true, + configurable: true, + writable: true, + value: value + } ); + } + } ); + }, + + fix: function( originalEvent ) { + return originalEvent[ jQuery.expando ] ? + originalEvent : + new jQuery.Event( originalEvent ); + }, + + special: { + load: { + + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + click: { + + // Utilize native event to ensure correct state for checkable inputs + setup: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Claim the first handler + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + // dataPriv.set( el, "click", ... ) + leverageNative( el, "click", returnTrue ); + } + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function( data ) { + + // For mutual compressibility with _default, replace `this` access with a local var. + // `|| data` is dead code meant only to preserve the variable through minification. + var el = this || data; + + // Force setup before triggering a click + if ( rcheckableType.test( el.type ) && + el.click && nodeName( el, "input" ) ) { + + leverageNative( el, "click" ); + } + + // Return non-false to allow normal event-path propagation + return true; + }, + + // For cross-browser consistency, suppress native .click() on links + // Also prevent it if we're currently inside a leveraged native-event stack + _default: function( event ) { + var target = event.target; + return rcheckableType.test( target.type ) && + target.click && nodeName( target, "input" ) && + dataPriv.get( target, "click" ) || + nodeName( target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + } +}; + +// Ensure the presence of an event listener that handles manually-triggered +// synthetic events by interrupting progress until reinvoked in response to +// *native* events that it fires directly, ensuring that state changes have +// already occurred before other listeners are invoked. +function leverageNative( el, type, expectSync ) { + + // Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add + if ( !expectSync ) { + if ( dataPriv.get( el, type ) === undefined ) { + jQuery.event.add( el, type, returnTrue ); + } + return; + } + + // Register the controller as a special universal handler for all event namespaces + dataPriv.set( el, type, false ); + jQuery.event.add( el, type, { + namespace: false, + handler: function( event ) { + var notAsync, result, + saved = dataPriv.get( this, type ); + + if ( ( event.isTrigger & 1 ) && this[ type ] ) { + + // Interrupt processing of the outer synthetic .trigger()ed event + // Saved data should be false in such cases, but might be a leftover capture object + // from an async native handler (gh-4350) + if ( !saved.length ) { + + // Store arguments for use when handling the inner native event + // There will always be at least one argument (an event object), so this array + // will not be confused with a leftover capture object. + saved = slice.call( arguments ); + dataPriv.set( this, type, saved ); + + // Trigger the native event and capture its result + // Support: IE <=9 - 11+ + // focus() and blur() are asynchronous + notAsync = expectSync( this, type ); + this[ type ](); + result = dataPriv.get( this, type ); + if ( saved !== result || notAsync ) { + dataPriv.set( this, type, false ); + } else { + result = {}; + } + if ( saved !== result ) { + + // Cancel the outer synthetic event + event.stopImmediatePropagation(); + event.preventDefault(); + + // Support: Chrome 86+ + // In Chrome, if an element having a focusout handler is blurred by + // clicking outside of it, it invokes the handler synchronously. If + // that handler calls `.remove()` on the element, the data is cleared, + // leaving `result` undefined. We need to guard against this. + return result && result.value; + } + + // If this is an inner synthetic event for an event with a bubbling surrogate + // (focus or blur), assume that the surrogate already propagated from triggering the + // native event and prevent that from happening again here. + // This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the + // bubbling surrogate propagates *after* the non-bubbling base), but that seems + // less bad than duplication. + } else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) { + event.stopPropagation(); + } + + // If this is a native event triggered above, everything is now in order + // Fire an inner synthetic event with the original arguments + } else if ( saved.length ) { + + // ...and capture the result + dataPriv.set( this, type, { + value: jQuery.event.trigger( + + // Support: IE <=9 - 11+ + // Extend with the prototype to reset the above stopImmediatePropagation() + jQuery.extend( saved[ 0 ], jQuery.Event.prototype ), + saved.slice( 1 ), + this + ) + } ); + + // Abort handling of the native event + event.stopImmediatePropagation(); + } + } + } ); +} + +jQuery.removeEvent = function( elem, type, handle ) { + + // This "if" is needed for plain objects + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle ); + } +}; + +jQuery.Event = function( src, props ) { + + // Allow instantiation without the 'new' keyword + if ( !( this instanceof jQuery.Event ) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + + // Support: Android <=2.3 only + src.returnValue === false ? + returnTrue : + returnFalse; + + // Create target properties + // Support: Safari <=6 - 7 only + // Target should not be a text node (#504, #13143) + this.target = ( src.target && src.target.nodeType === 3 ) ? + src.target.parentNode : + src.target; + + this.currentTarget = src.currentTarget; + this.relatedTarget = src.relatedTarget; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || Date.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + constructor: jQuery.Event, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + isSimulated: false, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + + if ( e && !this.isSimulated ) { + e.preventDefault(); + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopPropagation(); + } + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && !this.isSimulated ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Includes all common event props including KeyEvent and MouseEvent specific props +jQuery.each( { + altKey: true, + bubbles: true, + cancelable: true, + changedTouches: true, + ctrlKey: true, + detail: true, + eventPhase: true, + metaKey: true, + pageX: true, + pageY: true, + shiftKey: true, + view: true, + "char": true, + code: true, + charCode: true, + key: true, + keyCode: true, + button: true, + buttons: true, + clientX: true, + clientY: true, + offsetX: true, + offsetY: true, + pointerId: true, + pointerType: true, + screenX: true, + screenY: true, + targetTouches: true, + toElement: true, + touches: true, + which: true +}, jQuery.event.addProp ); + +jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) { + jQuery.event.special[ type ] = { + + // Utilize native event if possible so blur/focus sequence is correct + setup: function() { + + // Claim the first handler + // dataPriv.set( this, "focus", ... ) + // dataPriv.set( this, "blur", ... ) + leverageNative( this, type, expectSync ); + + // Return false to allow normal processing in the caller + return false; + }, + trigger: function() { + + // Force setup before trigger + leverageNative( this, type ); + + // Return non-false to allow normal event-path propagation + return true; + }, + + // Suppress native focus or blur as it's already being fired + // in leverageNative. + _default: function() { + return true; + }, + + delegateType: delegateType + }; +} ); + +// Create mouseenter/leave events using mouseover/out and event-time checks +// so that event delegation works in jQuery. +// Do the same for pointerenter/pointerleave and pointerover/pointerout +// +// Support: Safari 7 only +// Safari sends mouseenter too often; see: +// https://bugs.chromium.org/p/chromium/issues/detail?id=470258 +// for the description of the bug (it existed in older Chrome versions as well). +jQuery.each( { + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mouseenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +} ); + +jQuery.fn.extend( { + + on: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn ); + }, + one: function( types, selector, data, fn ) { + return on( this, types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? + handleObj.origType + "." + handleObj.namespace : + handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each( function() { + jQuery.event.remove( this, types, fn, selector ); + } ); + } +} ); + + +var + + // Support: IE <=10 - 11, Edge 12 - 13 only + // In IE/Edge using regex groups here causes severe slowdowns. + // See https://connect.microsoft.com/IE/feedback/details/1736512/ + rnoInnerhtml = /\s*$/g; + +// Prefer a tbody over its parent table for containing new rows +function manipulationTarget( elem, content ) { + if ( nodeName( elem, "table" ) && + nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) { + + return jQuery( elem ).children( "tbody" )[ 0 ] || elem; + } + + return elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) { + elem.type = elem.type.slice( 5 ); + } else { + elem.removeAttribute( "type" ); + } + + return elem; +} + +function cloneCopyEvent( src, dest ) { + var i, l, type, pdataOld, udataOld, udataCur, events; + + if ( dest.nodeType !== 1 ) { + return; + } + + // 1. Copy private data: events, handlers, etc. + if ( dataPriv.hasData( src ) ) { + pdataOld = dataPriv.get( src ); + events = pdataOld.events; + + if ( events ) { + dataPriv.remove( dest, "handle events" ); + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + } + + // 2. Copy user data + if ( dataUser.hasData( src ) ) { + udataOld = dataUser.access( src ); + udataCur = jQuery.extend( {}, udataOld ); + + dataUser.set( dest, udataCur ); + } +} + +// Fix IE bugs, see support tests +function fixInput( src, dest ) { + var nodeName = dest.nodeName.toLowerCase(); + + // Fails to persist the checked state of a cloned checkbox or radio button. + if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + dest.checked = src.checked; + + // Fails to return the selected option to the default selected state when cloning options + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +function domManip( collection, args, callback, ignored ) { + + // Flatten any nested arrays + args = flat( args ); + + var fragment, first, scripts, hasScripts, node, doc, + i = 0, + l = collection.length, + iNoClone = l - 1, + value = args[ 0 ], + valueIsFunction = isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( valueIsFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return collection.each( function( index ) { + var self = collection.eq( index ); + if ( valueIsFunction ) { + args[ 0 ] = value.call( this, index, self.html() ); + } + domManip( self, args, callback, ignored ); + } ); + } + + if ( l ) { + fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + // Require either new content or an interest in ignored elements to invoke the callback + if ( first || ignored ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item + // instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + + // Support: Android <=4.0 only, PhantomJS 1 only + // push.apply(_, arraylike) throws on ancient WebKit + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( collection[ i ], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !dataPriv.access( node, "globalEval" ) && + jQuery.contains( doc, node ) ) { + + if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) { + + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl && !node.noModule ) { + jQuery._evalUrl( node.src, { + nonce: node.nonce || node.getAttribute( "nonce" ) + }, doc ); + } + } else { + DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc ); + } + } + } + } + } + } + + return collection; +} + +function remove( elem, selector, keepData ) { + var node, + nodes = selector ? jQuery.filter( selector, elem ) : elem, + i = 0; + + for ( ; ( node = nodes[ i ] ) != null; i++ ) { + if ( !keepData && node.nodeType === 1 ) { + jQuery.cleanData( getAll( node ) ); + } + + if ( node.parentNode ) { + if ( keepData && isAttached( node ) ) { + setGlobalEval( getAll( node, "script" ) ); + } + node.parentNode.removeChild( node ); + } + } + + return elem; +} + +jQuery.extend( { + htmlPrefilter: function( html ) { + return html; + }, + + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var i, l, srcElements, destElements, + clone = elem.cloneNode( true ), + inPage = isAttached( elem ); + + // Fix IE cloning issues + if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) && + !jQuery.isXMLDoc( elem ) ) { + + // We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + fixInput( srcElements[ i ], destElements[ i ] ); + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0, l = srcElements.length; i < l; i++ ) { + cloneCopyEvent( srcElements[ i ], destElements[ i ] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + // Return the cloned set + return clone; + }, + + cleanData: function( elems ) { + var data, elem, type, + special = jQuery.event.special, + i = 0; + + for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) { + if ( acceptData( elem ) ) { + if ( ( data = elem[ dataPriv.expando ] ) ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataPriv.expando ] = undefined; + } + if ( elem[ dataUser.expando ] ) { + + // Support: Chrome <=35 - 45+ + // Assign undefined instead of using delete, see Data#remove + elem[ dataUser.expando ] = undefined; + } + } + } + } +} ); + +jQuery.fn.extend( { + detach: function( selector ) { + return remove( this, selector, true ); + }, + + remove: function( selector ) { + return remove( this, selector ); + }, + + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().each( function() { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + this.textContent = value; + } + } ); + }, null, value, arguments.length ); + }, + + append: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + } ); + }, + + prepend: function() { + return domManip( this, arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + } ); + }, + + before: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + } ); + }, + + after: function() { + return domManip( this, arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + } ); + }, + + empty: function() { + var elem, + i = 0; + + for ( ; ( elem = this[ i ] ) != null; i++ ) { + if ( elem.nodeType === 1 ) { + + // Prevent memory leaks + jQuery.cleanData( getAll( elem, false ) ); + + // Remove any remaining nodes + elem.textContent = ""; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + } ); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined && elem.nodeType === 1 ) { + return elem.innerHTML; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + !wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) { + + value = jQuery.htmlPrefilter( value ); + + try { + for ( ; i < l; i++ ) { + elem = this[ i ] || {}; + + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch ( e ) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var ignored = []; + + // Make the changes, replacing each non-ignored context element with the new content + return domManip( this, arguments, function( elem ) { + var parent = this.parentNode; + + if ( jQuery.inArray( this, ignored ) < 0 ) { + jQuery.cleanData( getAll( this ) ); + if ( parent ) { + parent.replaceChild( elem, this ); + } + } + + // Force callback invocation + }, ignored ); + } +} ); + +jQuery.each( { + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1, + i = 0; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone( true ); + jQuery( insert[ i ] )[ original ]( elems ); + + // Support: Android <=4.0 only, PhantomJS 1 only + // .get() because push.apply(_, arraylike) throws on ancient WebKit + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +} ); +var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" ); + +var getStyles = function( elem ) { + + // Support: IE <=11 only, Firefox <=30 (#15098, #14150) + // IE throws on elements created in popups + // FF meanwhile throws on frame elements through "defaultView.getComputedStyle" + var view = elem.ownerDocument.defaultView; + + if ( !view || !view.opener ) { + view = window; + } + + return view.getComputedStyle( elem ); + }; + +var swap = function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; +}; + + +var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); + + + +( function() { + + // Executing both pixelPosition & boxSizingReliable tests require only one layout + // so they're executed at the same time to save the second computation. + function computeStyleTests() { + + // This is a singleton, we need to execute it only once + if ( !div ) { + return; + } + + container.style.cssText = "position:absolute;left:-11111px;width:60px;" + + "margin-top:1px;padding:0;border:0"; + div.style.cssText = + "position:relative;display:block;box-sizing:border-box;overflow:scroll;" + + "margin:auto;border:1px;padding:1px;" + + "width:60%;top:1%"; + documentElement.appendChild( container ).appendChild( div ); + + var divStyle = window.getComputedStyle( div ); + pixelPositionVal = divStyle.top !== "1%"; + + // Support: Android 4.0 - 4.3 only, Firefox <=3 - 44 + reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12; + + // Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3 + // Some styles come back with percentage values, even though they shouldn't + div.style.right = "60%"; + pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36; + + // Support: IE 9 - 11 only + // Detect misreporting of content dimensions for box-sizing:border-box elements + boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36; + + // Support: IE 9 only + // Detect overflow:scroll screwiness (gh-3699) + // Support: Chrome <=64 + // Don't get tricked when zoom affects offsetWidth (gh-4029) + div.style.position = "absolute"; + scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12; + + documentElement.removeChild( container ); + + // Nullify the div so it wouldn't be stored in the memory and + // it will also be a sign that checks already performed + div = null; + } + + function roundPixelMeasures( measure ) { + return Math.round( parseFloat( measure ) ); + } + + var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal, + reliableTrDimensionsVal, reliableMarginLeftVal, + container = document.createElement( "div" ), + div = document.createElement( "div" ); + + // Finish early in limited (non-browser) environments + if ( !div.style ) { + return; + } + + // Support: IE <=9 - 11 only + // Style of cloned element affects source element cloned (#8908) + div.style.backgroundClip = "content-box"; + div.cloneNode( true ).style.backgroundClip = ""; + support.clearCloneStyle = div.style.backgroundClip === "content-box"; + + jQuery.extend( support, { + boxSizingReliable: function() { + computeStyleTests(); + return boxSizingReliableVal; + }, + pixelBoxStyles: function() { + computeStyleTests(); + return pixelBoxStylesVal; + }, + pixelPosition: function() { + computeStyleTests(); + return pixelPositionVal; + }, + reliableMarginLeft: function() { + computeStyleTests(); + return reliableMarginLeftVal; + }, + scrollboxSize: function() { + computeStyleTests(); + return scrollboxSizeVal; + }, + + // Support: IE 9 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Behavior in IE 9 is more subtle than in newer versions & it passes + // some versions of this test; make sure not to make it pass there! + // + // Support: Firefox 70+ + // Only Firefox includes border widths + // in computed dimensions. (gh-4529) + reliableTrDimensions: function() { + var table, tr, trChild, trStyle; + if ( reliableTrDimensionsVal == null ) { + table = document.createElement( "table" ); + tr = document.createElement( "tr" ); + trChild = document.createElement( "div" ); + + table.style.cssText = "position:absolute;left:-11111px;border-collapse:separate"; + tr.style.cssText = "border:1px solid"; + + // Support: Chrome 86+ + // Height set through cssText does not get applied. + // Computed height then comes back as 0. + tr.style.height = "1px"; + trChild.style.height = "9px"; + + // Support: Android 8 Chrome 86+ + // In our bodyBackground.html iframe, + // display for all div elements is set to "inline", + // which causes a problem only in Android 8 Chrome 86. + // Ensuring the div is display: block + // gets around this issue. + trChild.style.display = "block"; + + documentElement + .appendChild( table ) + .appendChild( tr ) + .appendChild( trChild ); + + trStyle = window.getComputedStyle( tr ); + reliableTrDimensionsVal = ( parseInt( trStyle.height, 10 ) + + parseInt( trStyle.borderTopWidth, 10 ) + + parseInt( trStyle.borderBottomWidth, 10 ) ) === tr.offsetHeight; + + documentElement.removeChild( table ); + } + return reliableTrDimensionsVal; + } + } ); +} )(); + + +function curCSS( elem, name, computed ) { + var width, minWidth, maxWidth, ret, + + // Support: Firefox 51+ + // Retrieving style before computed somehow + // fixes an issue with getting wrong values + // on detached elements + style = elem.style; + + computed = computed || getStyles( elem ); + + // getPropertyValue is needed for: + // .css('filter') (IE 9 only, #12537) + // .css('--customProperty) (#3144) + if ( computed ) { + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !isAttached( elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Android Browser returns percentage for some values, + // but width seems to be reliably pixels. + // This is against the CSSOM draft spec: + // https://drafts.csswg.org/cssom/#resolved-values + if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) { + + // Remember the original values + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + // Put in the new values to get a computed value out + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + // Revert the changed values + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret !== undefined ? + + // Support: IE <=9 - 11 only + // IE returns zIndex value as an integer. + ret + "" : + ret; +} + + +function addGetHookIf( conditionFn, hookFn ) { + + // Define the hook, we'll check on the first run if it's really needed. + return { + get: function() { + if ( conditionFn() ) { + + // Hook not needed (or it's not possible to use it due + // to missing dependency), remove it. + delete this.get; + return; + } + + // Hook needed; redefine it so that the support test is not executed again. + return ( this.get = hookFn ).apply( this, arguments ); + } + }; +} + + +var cssPrefixes = [ "Webkit", "Moz", "ms" ], + emptyStyle = document.createElement( "div" ).style, + vendorProps = {}; + +// Return a vendor-prefixed property or undefined +function vendorPropName( name ) { + + // Check for vendor prefixed names + var capName = name[ 0 ].toUpperCase() + name.slice( 1 ), + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in emptyStyle ) { + return name; + } + } +} + +// Return a potentially-mapped jQuery.cssProps or vendor prefixed property +function finalPropName( name ) { + var final = jQuery.cssProps[ name ] || vendorProps[ name ]; + + if ( final ) { + return final; + } + if ( name in emptyStyle ) { + return name; + } + return vendorProps[ name ] = vendorPropName( name ) || name; +} + + +var + + // Swappable if display is none or starts with table + // except "table", "table-cell", or "table-caption" + // See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rcustomProp = /^--/, + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: "0", + fontWeight: "400" + }; + +function setPositiveNumber( _elem, value, subtract ) { + + // Any relative (+/-) values have already been + // normalized at this point + var matches = rcssNum.exec( value ); + return matches ? + + // Guard against undefined "subtract", e.g., when used as in cssHooks + Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) : + value; +} + +function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) { + var i = dimension === "width" ? 1 : 0, + extra = 0, + delta = 0; + + // Adjustment may not be necessary + if ( box === ( isBorderBox ? "border" : "content" ) ) { + return 0; + } + + for ( ; i < 4; i += 2 ) { + + // Both box models exclude margin + if ( box === "margin" ) { + delta += jQuery.css( elem, box + cssExpand[ i ], true, styles ); + } + + // If we get here with a content-box, we're seeking "padding" or "border" or "margin" + if ( !isBorderBox ) { + + // Add padding + delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + + // For "border" or "margin", add border + if ( box !== "padding" ) { + delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + + // But still keep track of it otherwise + } else { + extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + + // If we get here with a border-box (content + padding + border), we're seeking "content" or + // "padding" or "margin" + } else { + + // For "content", subtract padding + if ( box === "content" ) { + delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles ); + } + + // For "content" or "padding", subtract border + if ( box !== "margin" ) { + delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles ); + } + } + } + + // Account for positive content-box scroll gutter when requested by providing computedVal + if ( !isBorderBox && computedVal >= 0 ) { + + // offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border + // Assuming integer scroll gutter, subtract the rest and round down + delta += Math.max( 0, Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + computedVal - + delta - + extra - + 0.5 + + // If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter + // Use an explicit zero to avoid NaN (gh-3964) + ) ) || 0; + } + + return delta; +} + +function getWidthOrHeight( elem, dimension, extra ) { + + // Start with computed style + var styles = getStyles( elem ), + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322). + // Fake content-box until we know it's needed to know the true value. + boxSizingNeeded = !support.boxSizingReliable() || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + valueIsBorderBox = isBorderBox, + + val = curCSS( elem, dimension, styles ), + offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ); + + // Support: Firefox <=54 + // Return a confounding non-pixel value or feign ignorance, as appropriate. + if ( rnumnonpx.test( val ) ) { + if ( !extra ) { + return val; + } + val = "auto"; + } + + + // Support: IE 9 - 11 only + // Use offsetWidth/offsetHeight for when box sizing is unreliable. + // In those cases, the computed value can be trusted to be border-box. + if ( ( !support.boxSizingReliable() && isBorderBox || + + // Support: IE 10 - 11+, Edge 15 - 18+ + // IE/Edge misreport `getComputedStyle` of table rows with width/height + // set in CSS while `offset*` properties report correct values. + // Interestingly, in some cases IE 9 doesn't suffer from this issue. + !support.reliableTrDimensions() && nodeName( elem, "tr" ) || + + // Fall back to offsetWidth/offsetHeight when value is "auto" + // This happens for inline elements with no explicit setting (gh-3571) + val === "auto" || + + // Support: Android <=4.1 - 4.3 only + // Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602) + !parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) && + + // Make sure the element is visible & connected + elem.getClientRects().length ) { + + isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box"; + + // Where available, offsetWidth/offsetHeight approximate border box dimensions. + // Where not available (e.g., SVG), assume unreliable box-sizing and interpret the + // retrieved value as a content box dimension. + valueIsBorderBox = offsetProp in elem; + if ( valueIsBorderBox ) { + val = elem[ offsetProp ]; + } + } + + // Normalize "" and auto + val = parseFloat( val ) || 0; + + // Adjust for the element's box model + return ( val + + boxModelAdjustment( + elem, + dimension, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox, + styles, + + // Provide the current computed size to request scroll gutter calculation (gh-3589) + val + ) + ) + "px"; +} + +jQuery.extend( { + + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + } + } + } + }, + + // Don't automatically add "px" to these possibly-unitless properties + cssNumber: { + "animationIterationCount": true, + "columnCount": true, + "fillOpacity": true, + "flexGrow": true, + "flexShrink": true, + "fontWeight": true, + "gridArea": true, + "gridColumn": true, + "gridColumnEnd": true, + "gridColumnStart": true, + "gridRow": true, + "gridRowEnd": true, + "gridRowStart": true, + "lineHeight": true, + "opacity": true, + "order": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: {}, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ), + style = elem.style; + + // Make sure that we're working with the right name. We don't + // want to query the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Gets hook for the prefixed version, then unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // Convert "+=" or "-=" to relative numbers (#7345) + if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) { + value = adjustCSS( elem, name, ret ); + + // Fixes bug #9237 + type = "number"; + } + + // Make sure that null and NaN values aren't set (#7116) + if ( value == null || value !== value ) { + return; + } + + // If a number was passed in, add the unit (except for certain CSS properties) + // The isCustomProp check can be removed in jQuery 4.0 when we only auto-append + // "px" to a few hardcoded values. + if ( type === "number" && !isCustomProp ) { + value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" ); + } + + // background-* props affect original clone's values + if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) { + style[ name ] = "inherit"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !( "set" in hooks ) || + ( value = hooks.set( elem, value, extra ) ) !== undefined ) { + + if ( isCustomProp ) { + style.setProperty( name, value ); + } else { + style[ name ] = value; + } + } + + } else { + + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && + ( ret = hooks.get( elem, false, extra ) ) !== undefined ) { + + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, extra, styles ) { + var val, num, hooks, + origName = camelCase( name ), + isCustomProp = rcustomProp.test( name ); + + // Make sure that we're working with the right name. We don't + // want to modify the value if it is a CSS custom property + // since they are user-defined. + if ( !isCustomProp ) { + name = finalPropName( origName ); + } + + // Try prefixed name followed by the unprefixed name + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name, styles ); + } + + // Convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Make numeric if forced or a qualifier was provided and val looks numeric + if ( extra === "" || extra ) { + num = parseFloat( val ); + return extra === true || isFinite( num ) ? num || 0 : val; + } + + return val; + } +} ); + +jQuery.each( [ "height", "width" ], function( _i, dimension ) { + jQuery.cssHooks[ dimension ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + + // Certain elements can have dimension info if we invisibly show them + // but it must have a current display style that would benefit + return rdisplayswap.test( jQuery.css( elem, "display" ) ) && + + // Support: Safari 8+ + // Table columns in Safari have non-zero offsetWidth & zero + // getBoundingClientRect().width unless display is changed. + // Support: IE <=11 only + // Running getBoundingClientRect on a disconnected node + // in IE throws an error. + ( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ? + swap( elem, cssShow, function() { + return getWidthOrHeight( elem, dimension, extra ); + } ) : + getWidthOrHeight( elem, dimension, extra ); + } + }, + + set: function( elem, value, extra ) { + var matches, + styles = getStyles( elem ), + + // Only read styles.position if the test has a chance to fail + // to avoid forcing a reflow. + scrollboxSizeBuggy = !support.scrollboxSize() && + styles.position === "absolute", + + // To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991) + boxSizingNeeded = scrollboxSizeBuggy || extra, + isBorderBox = boxSizingNeeded && + jQuery.css( elem, "boxSizing", false, styles ) === "border-box", + subtract = extra ? + boxModelAdjustment( + elem, + dimension, + extra, + isBorderBox, + styles + ) : + 0; + + // Account for unreliable border-box dimensions by comparing offset* to computed and + // faking a content-box to get border and padding (gh-3699) + if ( isBorderBox && scrollboxSizeBuggy ) { + subtract -= Math.ceil( + elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] - + parseFloat( styles[ dimension ] ) - + boxModelAdjustment( elem, dimension, "border", false, styles ) - + 0.5 + ); + } + + // Convert to pixels if value adjustment is needed + if ( subtract && ( matches = rcssNum.exec( value ) ) && + ( matches[ 3 ] || "px" ) !== "px" ) { + + elem.style[ dimension ] = value; + value = jQuery.css( elem, dimension ); + } + + return setPositiveNumber( elem, value, subtract ); + } + }; +} ); + +jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft, + function( elem, computed ) { + if ( computed ) { + return ( parseFloat( curCSS( elem, "marginLeft" ) ) || + elem.getBoundingClientRect().left - + swap( elem, { marginLeft: 0 }, function() { + return elem.getBoundingClientRect().left; + } ) + ) + "px"; + } + } +); + +// These hooks are used by animate to expand properties +jQuery.each( { + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i = 0, + expanded = {}, + + // Assumes a single number if not a string + parts = typeof value === "string" ? value.split( " " ) : [ value ]; + + for ( ; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( prefix !== "margin" ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +} ); + +jQuery.fn.extend( { + css: function( name, value ) { + return access( this, function( elem, name, value ) { + var styles, len, + map = {}, + i = 0; + + if ( Array.isArray( name ) ) { + styles = getStyles( elem ); + len = name.length; + + for ( ; i < len; i++ ) { + map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles ); + } + + return map; + } + + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + } +} ); + + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || jQuery.easing._default; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + // Use a property on the element directly when it is not a DOM element, + // or when there is no matching style property that exists. + if ( tween.elem.nodeType !== 1 || + tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) { + return tween.elem[ tween.prop ]; + } + + // Passing an empty string as a 3rd parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails. + // Simple values such as "10px" are parsed to Float; + // complex values such as "rotate(1rad)" are returned as-is. + result = jQuery.css( tween.elem, tween.prop, "" ); + + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + + // Use step hook for back compat. + // Use cssHook if its there. + // Use .style if available and use plain properties where available. + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.nodeType === 1 && ( + jQuery.cssHooks[ tween.prop ] || + tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Support: IE <=9 only +// Panic based approach to setting things on disconnected nodes +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p * Math.PI ) / 2; + }, + _default: "swing" +}; + +jQuery.fx = Tween.prototype.init; + +// Back compat <1.8 extension point +jQuery.fx.step = {}; + + + + +var + fxNow, inProgress, + rfxtypes = /^(?:toggle|show|hide)$/, + rrun = /queueHooks$/; + +function schedule() { + if ( inProgress ) { + if ( document.hidden === false && window.requestAnimationFrame ) { + window.requestAnimationFrame( schedule ); + } else { + window.setTimeout( schedule, jQuery.fx.interval ); + } + + jQuery.fx.tick(); + } +} + +// Animations created synchronously will run synchronously +function createFxNow() { + window.setTimeout( function() { + fxNow = undefined; + } ); + return ( fxNow = Date.now() ); +} + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + i = 0, + attrs = { height: type }; + + // If we include width, step value is 1 to do all cssExpand values, + // otherwise step value is 2 to skip over Left and Right + includeWidth = includeWidth ? 1 : 0; + for ( ; i < 4; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +function createTween( value, prop, animation ) { + var tween, + collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) { + + // We're done with this property + return tween; + } + } +} + +function defaultPrefilter( elem, props, opts ) { + var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display, + isBox = "width" in props || "height" in props, + anim = this, + orig = {}, + style = elem.style, + hidden = elem.nodeType && isHiddenWithinTree( elem ), + dataShow = dataPriv.get( elem, "fxshow" ); + + // Queue-skipping animations hijack the fx hooks + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always( function() { + + // Ensure the complete handler is called before this completes + anim.always( function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + } ); + } ); + } + + // Detect show/hide animations + for ( prop in props ) { + value = props[ prop ]; + if ( rfxtypes.test( value ) ) { + delete props[ prop ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + + // Pretend to be hidden if this is a "show" and + // there is still data from a stopped show/hide + if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) { + hidden = true; + + // Ignore all other no-op show/hide data + } else { + continue; + } + } + orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop ); + } + } + + // Bail out if this is a no-op like .hide().hide() + propTween = !jQuery.isEmptyObject( props ); + if ( !propTween && jQuery.isEmptyObject( orig ) ) { + return; + } + + // Restrict "overflow" and "display" styles during box animations + if ( isBox && elem.nodeType === 1 ) { + + // Support: IE <=9 - 11, Edge 12 - 15 + // Record all 3 overflow attributes because IE does not infer the shorthand + // from identically-valued overflowX and overflowY and Edge just mirrors + // the overflowX value there. + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Identify a display type, preferring old show/hide data over the CSS cascade + restoreDisplay = dataShow && dataShow.display; + if ( restoreDisplay == null ) { + restoreDisplay = dataPriv.get( elem, "display" ); + } + display = jQuery.css( elem, "display" ); + if ( display === "none" ) { + if ( restoreDisplay ) { + display = restoreDisplay; + } else { + + // Get nonempty value(s) by temporarily forcing visibility + showHide( [ elem ], true ); + restoreDisplay = elem.style.display || restoreDisplay; + display = jQuery.css( elem, "display" ); + showHide( [ elem ] ); + } + } + + // Animate inline elements as inline-block + if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) { + if ( jQuery.css( elem, "float" ) === "none" ) { + + // Restore the original display value at the end of pure show/hide animations + if ( !propTween ) { + anim.done( function() { + style.display = restoreDisplay; + } ); + if ( restoreDisplay == null ) { + display = style.display; + restoreDisplay = display === "none" ? "" : display; + } + } + style.display = "inline-block"; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + anim.always( function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + } ); + } + + // Implement show/hide animations + propTween = false; + for ( prop in orig ) { + + // General show/hide setup for this element animation + if ( !propTween ) { + if ( dataShow ) { + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + } else { + dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } ); + } + + // Store hidden/visible for toggle so `.stop().toggle()` "reverses" + if ( toggle ) { + dataShow.hidden = !hidden; + } + + // Show elements before animating them + if ( hidden ) { + showHide( [ elem ], true ); + } + + /* eslint-disable no-loop-func */ + + anim.done( function() { + + /* eslint-enable no-loop-func */ + + // The final step of a "hide" animation is actually hiding the element + if ( !hidden ) { + showHide( [ elem ] ); + } + dataPriv.remove( elem, "fxshow" ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + } ); + } + + // Per-property setup + propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim ); + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = propTween.start; + if ( hidden ) { + propTween.end = propTween.start; + propTween.start = 0; + } + } + } +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( Array.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // Not quite $.extend, this won't overwrite existing keys. + // Reusing 'index' because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +function Animation( elem, properties, options ) { + var result, + stopped, + index = 0, + length = Animation.prefilters.length, + deferred = jQuery.Deferred().always( function() { + + // Don't match elem in the :animated selector + delete tick.elem; + } ), + tick = function() { + if ( stopped ) { + return false; + } + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + + // Support: Android 2.3 only + // Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ] ); + + // If there's more to do, yield + if ( percent < 1 && length ) { + return remaining; + } + + // If this was an empty animation, synthesize a final progress notification + if ( !length ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + } + + // Resolve the animation and report its conclusion + deferred.resolveWith( elem, [ animation ] ); + return false; + }, + animation = deferred.promise( { + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { + specialEasing: {}, + easing: jQuery.easing._default + }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + + // If we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + if ( stopped ) { + return this; + } + stopped = true; + for ( ; index < length; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // Resolve when we played the last frame; otherwise, reject + if ( gotoEnd ) { + deferred.notifyWith( elem, [ animation, 1, 0 ] ); + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + } ), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length; index++ ) { + result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + if ( isFunction( result.stop ) ) { + jQuery._queueHooks( animation.elem, animation.opts.queue ).stop = + result.stop.bind( result ); + } + return result; + } + } + + jQuery.map( props, createTween, animation ); + + if ( isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + // Attach callbacks from options + animation + .progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); + + jQuery.fx.timer( + jQuery.extend( tick, { + elem: elem, + anim: animation, + queue: animation.opts.queue + } ) + ); + + return animation; +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweeners: { + "*": [ function( prop, value ) { + var tween = this.createTween( prop, value ); + adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween ); + return tween; + } ] + }, + + tweener: function( props, callback ) { + if ( isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.match( rnothtmlwhite ); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length; index++ ) { + prop = props[ index ]; + Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || []; + Animation.tweeners[ prop ].unshift( callback ); + } + }, + + prefilters: [ defaultPrefilter ], + + prefilter: function( callback, prepend ) { + if ( prepend ) { + Animation.prefilters.unshift( callback ); + } else { + Animation.prefilters.push( callback ); + } + } +} ); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !isFunction( easing ) && easing + }; + + // Go to the end state if fx are off + if ( jQuery.fx.off ) { + opt.duration = 0; + + } else { + if ( typeof opt.duration !== "number" ) { + if ( opt.duration in jQuery.fx.speeds ) { + opt.duration = jQuery.fx.speeds[ opt.duration ]; + + } else { + opt.duration = jQuery.fx.speeds._default; + } + } + } + + // Normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.fn.extend( { + fadeTo: function( speed, to, easing, callback ) { + + // Show any hidden elements after setting opacity to 0 + return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show() + + // Animate to the value specified + .end().animate( { opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations, or finishing resolves immediately + if ( empty || dataPriv.get( this, "finish" ) ) { + anim.stop( true ); + } + }; + + doAnimation.finish = doAnimation; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue ) { + this.queue( type || "fx", [] ); + } + + return this.each( function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = dataPriv.get( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && + ( type == null || timers[ index ].queue === type ) ) { + + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // Start the next in the queue if the last step wasn't forced. + // Timers currently will call their complete callbacks, which + // will dequeue but only if they were gotoEnd. + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + } ); + }, + finish: function( type ) { + if ( type !== false ) { + type = type || "fx"; + } + return this.each( function() { + var index, + data = dataPriv.get( this ), + queue = data[ type + "queue" ], + hooks = data[ type + "queueHooks" ], + timers = jQuery.timers, + length = queue ? queue.length : 0; + + // Enable finishing flag on private data + data.finish = true; + + // Empty the queue first + jQuery.queue( this, type, [] ); + + if ( hooks && hooks.stop ) { + hooks.stop.call( this, true ); + } + + // Look for any active animations, and finish them + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && timers[ index ].queue === type ) { + timers[ index ].anim.stop( true ); + timers.splice( index, 1 ); + } + } + + // Look for any animations in the old queue and finish them + for ( index = 0; index < length; index++ ) { + if ( queue[ index ] && queue[ index ].finish ) { + queue[ index ].finish.call( this ); + } + } + + // Turn off finishing flag + delete data.finish; + } ); + } +} ); + +jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +} ); + +// Generate shortcuts for custom animations +jQuery.each( { + slideDown: genFx( "show" ), + slideUp: genFx( "hide" ), + slideToggle: genFx( "toggle" ), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +} ); + +jQuery.timers = []; +jQuery.fx.tick = function() { + var timer, + i = 0, + timers = jQuery.timers; + + fxNow = Date.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + + // Run the timer and safely remove it when done (allowing for external removal) + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + jQuery.timers.push( timer ); + jQuery.fx.start(); +}; + +jQuery.fx.interval = 13; +jQuery.fx.start = function() { + if ( inProgress ) { + return; + } + + inProgress = true; + schedule(); +}; + +jQuery.fx.stop = function() { + inProgress = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + + // Default speed + _default: 400 +}; + + +// Based off of the plugin by Clint Helfers, with permission. +// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/ +jQuery.fn.delay = function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = window.setTimeout( next, time ); + hooks.stop = function() { + window.clearTimeout( timeout ); + }; + } ); +}; + + +( function() { + var input = document.createElement( "input" ), + select = document.createElement( "select" ), + opt = select.appendChild( document.createElement( "option" ) ); + + input.type = "checkbox"; + + // Support: Android <=4.3 only + // Default value for a checkbox should be "on" + support.checkOn = input.value !== ""; + + // Support: IE <=11 only + // Must access selectedIndex to make default options select + support.optSelected = opt.selected; + + // Support: IE <=11 only + // An input loses its value after becoming a radio + input = document.createElement( "input" ); + input.value = "t"; + input.type = "radio"; + support.radioValue = input.value === "t"; +} )(); + + +var boolHook, + attrHandle = jQuery.expr.attrHandle; + +jQuery.fn.extend( { + attr: function( name, value ) { + return access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each( function() { + jQuery.removeAttr( this, name ); + } ); + } +} ); + +jQuery.extend( { + attr: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set attributes on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + // Attribute hooks are determined by the lowercase version + // Grab necessary hook if one is defined + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + hooks = jQuery.attrHooks[ name.toLowerCase() ] || + ( jQuery.expr.match.bool.test( name ) ? boolHook : undefined ); + } + + if ( value !== undefined ) { + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + } + + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + elem.setAttribute( name, value + "" ); + return value; + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + ret = jQuery.find.attr( elem, name ); + + // Non-existent attributes return null, we normalize to undefined + return ret == null ? undefined : ret; + }, + + attrHooks: { + type: { + set: function( elem, value ) { + if ( !support.radioValue && value === "radio" && + nodeName( elem, "input" ) ) { + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + } + }, + + removeAttr: function( elem, value ) { + var name, + i = 0, + + // Attribute names can contain non-HTML whitespace characters + // https://html.spec.whatwg.org/multipage/syntax.html#attributes-2 + attrNames = value && value.match( rnothtmlwhite ); + + if ( attrNames && elem.nodeType === 1 ) { + while ( ( name = attrNames[ i++ ] ) ) { + elem.removeAttribute( name ); + } + } + } +} ); + +// Hooks for boolean attributes +boolHook = { + set: function( elem, value, name ) { + if ( value === false ) { + + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + elem.setAttribute( name, name ); + } + return name; + } +}; + +jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) { + var getter = attrHandle[ name ] || jQuery.find.attr; + + attrHandle[ name ] = function( elem, name, isXML ) { + var ret, handle, + lowercaseName = name.toLowerCase(); + + if ( !isXML ) { + + // Avoid an infinite loop by temporarily removing this function from the getter + handle = attrHandle[ lowercaseName ]; + attrHandle[ lowercaseName ] = ret; + ret = getter( elem, name, isXML ) != null ? + lowercaseName : + null; + attrHandle[ lowercaseName ] = handle; + } + return ret; + }; +} ); + + + + +var rfocusable = /^(?:input|select|textarea|button)$/i, + rclickable = /^(?:a|area)$/i; + +jQuery.fn.extend( { + prop: function( name, value ) { + return access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + return this.each( function() { + delete this[ jQuery.propFix[ name ] || name ]; + } ); + } +} ); + +jQuery.extend( { + prop: function( elem, name, value ) { + var ret, hooks, + nType = elem.nodeType; + + // Don't get/set properties on text, comment and attribute nodes + if ( nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) { + + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && + ( ret = hooks.set( elem, value, name ) ) !== undefined ) { + return ret; + } + + return ( elem[ name ] = value ); + } + + if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) { + return ret; + } + + return elem[ name ]; + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + + // Support: IE <=9 - 11 only + // elem.tabIndex doesn't always return the + // correct value when it hasn't been explicitly set + // https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + // Use proper attribute retrieval(#12072) + var tabindex = jQuery.find.attr( elem, "tabindex" ); + + if ( tabindex ) { + return parseInt( tabindex, 10 ); + } + + if ( + rfocusable.test( elem.nodeName ) || + rclickable.test( elem.nodeName ) && + elem.href + ) { + return 0; + } + + return -1; + } + } + }, + + propFix: { + "for": "htmlFor", + "class": "className" + } +} ); + +// Support: IE <=11 only +// Accessing the selectedIndex property +// forces the browser to respect setting selected +// on the option +// The getter ensures a default option is selected +// when in an optgroup +// eslint rule "no-unused-expressions" is disabled for this code +// since it considers such accessions noop +if ( !support.optSelected ) { + jQuery.propHooks.selected = { + get: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent && parent.parentNode ) { + parent.parentNode.selectedIndex; + } + return null; + }, + set: function( elem ) { + + /* eslint no-unused-expressions: "off" */ + + var parent = elem.parentNode; + if ( parent ) { + parent.selectedIndex; + + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + } + }; +} + +jQuery.each( [ + "tabIndex", + "readOnly", + "maxLength", + "cellSpacing", + "cellPadding", + "rowSpan", + "colSpan", + "useMap", + "frameBorder", + "contentEditable" +], function() { + jQuery.propFix[ this.toLowerCase() ] = this; +} ); + + + + + // Strip and collapse whitespace according to HTML spec + // https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace + function stripAndCollapse( value ) { + var tokens = value.match( rnothtmlwhite ) || []; + return tokens.join( " " ); + } + + +function getClass( elem ) { + return elem.getAttribute && elem.getAttribute( "class" ) || ""; +} + +function classesToArray( value ) { + if ( Array.isArray( value ) ) { + return value; + } + if ( typeof value === "string" ) { + return value.match( rnothtmlwhite ) || []; + } + return []; +} + +jQuery.fn.extend( { + addClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).addClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + if ( cur.indexOf( " " + clazz + " " ) < 0 ) { + cur += clazz + " "; + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var classes, elem, cur, curValue, clazz, j, finalValue, + i = 0; + + if ( isFunction( value ) ) { + return this.each( function( j ) { + jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) ); + } ); + } + + if ( !arguments.length ) { + return this.attr( "class", "" ); + } + + classes = classesToArray( value ); + + if ( classes.length ) { + while ( ( elem = this[ i++ ] ) ) { + curValue = getClass( elem ); + + // This expression is here for better compressibility (see addClass) + cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " ); + + if ( cur ) { + j = 0; + while ( ( clazz = classes[ j++ ] ) ) { + + // Remove *all* instances + while ( cur.indexOf( " " + clazz + " " ) > -1 ) { + cur = cur.replace( " " + clazz + " ", " " ); + } + } + + // Only assign if different to avoid unneeded rendering. + finalValue = stripAndCollapse( cur ); + if ( curValue !== finalValue ) { + elem.setAttribute( "class", finalValue ); + } + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isValidValue = type === "string" || Array.isArray( value ); + + if ( typeof stateVal === "boolean" && isValidValue ) { + return stateVal ? this.addClass( value ) : this.removeClass( value ); + } + + if ( isFunction( value ) ) { + return this.each( function( i ) { + jQuery( this ).toggleClass( + value.call( this, i, getClass( this ), stateVal ), + stateVal + ); + } ); + } + + return this.each( function() { + var className, i, self, classNames; + + if ( isValidValue ) { + + // Toggle individual class names + i = 0; + self = jQuery( this ); + classNames = classesToArray( value ); + + while ( ( className = classNames[ i++ ] ) ) { + + // Check each className given, space separated list + if ( self.hasClass( className ) ) { + self.removeClass( className ); + } else { + self.addClass( className ); + } + } + + // Toggle whole class name + } else if ( value === undefined || type === "boolean" ) { + className = getClass( this ); + if ( className ) { + + // Store className if set + dataPriv.set( this, "__className__", className ); + } + + // If the element has a class name or if we're passed `false`, + // then remove the whole classname (if there was one, the above saved it). + // Otherwise bring back whatever was previously saved (if anything), + // falling back to the empty string if nothing was stored. + if ( this.setAttribute ) { + this.setAttribute( "class", + className || value === false ? + "" : + dataPriv.get( this, "__className__" ) || "" + ); + } + } + } ); + }, + + hasClass: function( selector ) { + var className, elem, + i = 0; + + className = " " + selector + " "; + while ( ( elem = this[ i++ ] ) ) { + if ( elem.nodeType === 1 && + ( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) { + return true; + } + } + + return false; + } +} ); + + + + +var rreturn = /\r/g; + +jQuery.fn.extend( { + val: function( value ) { + var hooks, ret, valueIsFunction, + elem = this[ 0 ]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || + jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && + "get" in hooks && + ( ret = hooks.get( elem, "value" ) ) !== undefined + ) { + return ret; + } + + ret = elem.value; + + // Handle most common string cases + if ( typeof ret === "string" ) { + return ret.replace( rreturn, "" ); + } + + // Handle cases where value is null/undef or number + return ret == null ? "" : ret; + } + + return; + } + + valueIsFunction = isFunction( value ); + + return this.each( function( i ) { + var val; + + if ( this.nodeType !== 1 ) { + return; + } + + if ( valueIsFunction ) { + val = value.call( this, i, jQuery( this ).val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + + } else if ( typeof val === "number" ) { + val += ""; + + } else if ( Array.isArray( val ) ) { + val = jQuery.map( val, function( value ) { + return value == null ? "" : value + ""; + } ); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + } ); + } +} ); + +jQuery.extend( { + valHooks: { + option: { + get: function( elem ) { + + var val = jQuery.find.attr( elem, "value" ); + return val != null ? + val : + + // Support: IE <=10 - 11 only + // option.text throws exceptions (#14686, #14858) + // Strip and collapse whitespace + // https://html.spec.whatwg.org/#strip-and-collapse-whitespace + stripAndCollapse( jQuery.text( elem ) ); + } + }, + select: { + get: function( elem ) { + var value, option, i, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one", + values = one ? null : [], + max = one ? index + 1 : options.length; + + if ( index < 0 ) { + i = max; + + } else { + i = one ? index : 0; + } + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // Support: IE <=9 only + // IE8-9 doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + + // Don't return options that are disabled or in a disabled optgroup + !option.disabled && + ( !option.parentNode.disabled || + !nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var optionSet, option, + options = elem.options, + values = jQuery.makeArray( value ), + i = options.length; + + while ( i-- ) { + option = options[ i ]; + + /* eslint-disable no-cond-assign */ + + if ( option.selected = + jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1 + ) { + optionSet = true; + } + + /* eslint-enable no-cond-assign */ + } + + // Force browsers to behave consistently when non-matching value is set + if ( !optionSet ) { + elem.selectedIndex = -1; + } + return values; + } + } + } +} ); + +// Radios and checkboxes getter/setter +jQuery.each( [ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + set: function( elem, value ) { + if ( Array.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 ); + } + } + }; + if ( !support.checkOn ) { + jQuery.valHooks[ this ].get = function( elem ) { + return elem.getAttribute( "value" ) === null ? "on" : elem.value; + }; + } +} ); + + + + +// Return jQuery for attributes-only inclusion + + +support.focusin = "onfocusin" in window; + + +var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + stopPropagationCallback = function( e ) { + e.stopPropagation(); + }; + +jQuery.extend( jQuery.event, { + + trigger: function( event, data, elem, onlyHandlers ) { + + var i, cur, tmp, bubbleType, ontype, handle, special, lastElement, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : []; + + cur = lastElement = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "." ) > -1 ) { + + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split( "." ); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf( ":" ) < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join( "." ); + event.rnamespace = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === ( elem.ownerDocument || document ) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) { + lastElement = cur; + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( dataPriv.get( cur, "events" ) || Object.create( null ) )[ event.type ] && + dataPriv.get( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( ( !special._default || + special._default.apply( eventPath.pop(), data ) === false ) && + acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name as the event. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + + if ( event.isPropagationStopped() ) { + lastElement.addEventListener( type, stopPropagationCallback ); + } + + elem[ type ](); + + if ( event.isPropagationStopped() ) { + lastElement.removeEventListener( type, stopPropagationCallback ); + } + + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + // Piggyback on a donor event to simulate a different one + // Used only for `focus(in | out)` events + simulate: function( type, elem, event ) { + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true + } + ); + + jQuery.event.trigger( e, null, elem ); + } + +} ); + +jQuery.fn.extend( { + + trigger: function( type, data ) { + return this.each( function() { + jQuery.event.trigger( type, data, this ); + } ); + }, + triggerHandler: function( type, data ) { + var elem = this[ 0 ]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +} ); + + +// Support: Firefox <=44 +// Firefox doesn't have focus(in | out) events +// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787 +// +// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1 +// focus(in | out) events fire after focus & blur events, +// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order +// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857 +if ( !support.focusin ) { + jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + + // Handle: regular nodes (via `this.ownerDocument`), window + // (via `this.document`) & document (via `this`). + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + dataPriv.access( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this.document || this, + attaches = dataPriv.access( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + dataPriv.remove( doc, fix ); + + } else { + dataPriv.access( doc, fix, attaches ); + } + } + }; + } ); +} +var location = window.location; + +var nonce = { guid: Date.now() }; + +var rquery = ( /\?/ ); + + + +// Cross-browser xml parsing +jQuery.parseXML = function( data ) { + var xml, parserErrorElem; + if ( !data || typeof data !== "string" ) { + return null; + } + + // Support: IE 9 - 11 only + // IE throws on parseFromString with invalid input. + try { + xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" ); + } catch ( e ) {} + + parserErrorElem = xml && xml.getElementsByTagName( "parsererror" )[ 0 ]; + if ( !xml || parserErrorElem ) { + jQuery.error( "Invalid XML: " + ( + parserErrorElem ? + jQuery.map( parserErrorElem.childNodes, function( el ) { + return el.textContent; + } ).join( "\n" ) : + data + ) ); + } + return xml; +}; + + +var + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i, + rsubmittable = /^(?:input|select|textarea|keygen)/i; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( Array.isArray( obj ) ) { + + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + + // Item is non-scalar (array or object), encode its numeric index. + buildParams( + prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]", + v, + traditional, + add + ); + } + } ); + + } else if ( !traditional && toType( obj ) === "object" ) { + + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + + // Serialize scalar item. + add( prefix, obj ); + } +} + +// Serialize an array of form elements or a set of +// key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, valueOrFunction ) { + + // If value is a function, invoke it and use its return value + var value = isFunction( valueOrFunction ) ? + valueOrFunction() : + valueOrFunction; + + s[ s.length ] = encodeURIComponent( key ) + "=" + + encodeURIComponent( value == null ? "" : value ); + }; + + if ( a == null ) { + return ""; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + } ); + + } else { + + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ); +}; + +jQuery.fn.extend( { + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map( function() { + + // Can add propHook for "elements" to filter or add form elements + var elements = jQuery.prop( this, "elements" ); + return elements ? jQuery.makeArray( elements ) : this; + } ).filter( function() { + var type = this.type; + + // Use .is( ":disabled" ) so that fieldset[disabled] works + return this.name && !jQuery( this ).is( ":disabled" ) && + rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) && + ( this.checked || !rcheckableType.test( type ) ); + } ).map( function( _i, elem ) { + var val = jQuery( this ).val(); + + if ( val == null ) { + return null; + } + + if ( Array.isArray( val ) ) { + return jQuery.map( val, function( val ) { + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ); + } + + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + } ).get(); + } +} ); + + +var + r20 = /%20/g, + rhash = /#.*$/, + rantiCache = /([?&])_=[^&]*/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg, + + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = "*/".concat( "*" ), + + // Anchor tag for parsing the document origin + originAnchor = document.createElement( "a" ); + +originAnchor.href = location.href; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, + i = 0, + dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || []; + + if ( isFunction( func ) ) { + + // For each dataType in the dataTypeExpression + while ( ( dataType = dataTypes[ i++ ] ) ) { + + // Prepend if requested + if ( dataType[ 0 ] === "+" ) { + dataType = dataType.slice( 1 ) || "*"; + ( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func ); + + // Otherwise append + } else { + ( structure[ dataType ] = structure[ dataType ] || [] ).push( func ); + } + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) { + + var inspected = {}, + seekingTransport = ( structure === transports ); + + function inspect( dataType ) { + var selected; + inspected[ dataType ] = true; + jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) { + var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR ); + if ( typeof dataTypeOrTransport === "string" && + !seekingTransport && !inspected[ dataTypeOrTransport ] ) { + + options.dataTypes.unshift( dataTypeOrTransport ); + inspect( dataTypeOrTransport ); + return false; + } else if ( seekingTransport ) { + return !( selected = dataTypeOrTransport ); + } + } ); + return selected; + } + + return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" ); +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } + + return target; +} + +/* Handles responses to an ajax request: + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes; + + // Remove auto dataType and get content-type in the process + while ( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +/* Chain conversions given the request and the original response + * Also sets the responseXXX fields on the jqXHR instance + */ +function ajaxConvert( s, response, jqXHR, isSuccess ) { + var conv2, current, conv, tmp, prev, + converters = {}, + + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(); + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + current = dataTypes.shift(); + + // Convert to each sequential dataType + while ( current ) { + + if ( s.responseFields[ current ] ) { + jqXHR[ s.responseFields[ current ] ] = response; + } + + // Apply the dataFilter if provided + if ( !prev && isSuccess && s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + prev = current; + current = dataTypes.shift(); + + if ( current ) { + + // There's only work to do if current dataType is non-auto + if ( current === "*" ) { + + current = prev; + + // Convert response if prev dataType is non-auto and differs from current + } else if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split( " " ); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.unshift( tmp[ 1 ] ); + } + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s.throws ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { + state: "parsererror", + error: conv ? e : "No conversion from " + prev + " to " + current + }; + } + } + } + } + } + } + + return { state: "success", data: response }; +} + +jQuery.extend( { + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {}, + + ajaxSettings: { + url: location.href, + type: "GET", + isLocal: rlocalProtocol.test( location.protocol ), + global: true, + processData: true, + async: true, + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + "*": allTypes, + text: "text/plain", + html: "text/html", + xml: "application/xml, text/xml", + json: "application/json, text/javascript" + }, + + contents: { + xml: /\bxml\b/, + html: /\bhtml/, + json: /\bjson\b/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText", + json: "responseJSON" + }, + + // Data converters + // Keys separate source (or catchall "*") and destination types with a single space + converters: { + + // Convert anything to text + "* text": String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": JSON.parse, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + url: true, + context: true + } + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + return settings ? + + // Building a settings object + ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) : + + // Extending ajaxSettings + ajaxExtend( jQuery.ajaxSettings, target ); + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var transport, + + // URL without anti-cache param + cacheURL, + + // Response headers + responseHeadersString, + responseHeaders, + + // timeout handle + timeoutTimer, + + // Url cleanup var + urlAnchor, + + // Request state (becomes false upon send and true upon completion) + completed, + + // To know if global events are to be dispatched + fireGlobals, + + // Loop variable + i, + + // uncached part of the url + uncached, + + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + + // Callbacks context + callbackContext = s.context || s, + + // Context for global events is callbackContext if it is a DOM node or jQuery collection + globalEventContext = s.context && + ( callbackContext.nodeType || callbackContext.jquery ) ? + jQuery( callbackContext ) : + jQuery.event, + + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + + // Status-dependent callbacks + statusCode = s.statusCode || {}, + + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + + // Default abort message + strAbort = "canceled", + + // Fake xhr + jqXHR = { + readyState: 0, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( completed ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while ( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[ 1 ].toLowerCase() + " " ] = + ( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] ) + .concat( match[ 2 ] ); + } + } + match = responseHeaders[ key.toLowerCase() + " " ]; + } + return match == null ? null : match.join( ", " ); + }, + + // Raw string + getAllResponseHeaders: function() { + return completed ? responseHeadersString : null; + }, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( completed == null ) { + name = requestHeadersNames[ name.toLowerCase() ] = + requestHeadersNames[ name.toLowerCase() ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( completed == null ) { + s.mimeType = type; + } + return this; + }, + + // Status-dependent callbacks + statusCode: function( map ) { + var code; + if ( map ) { + if ( completed ) { + + // Execute the appropriate callbacks + jqXHR.always( map[ jqXHR.status ] ); + } else { + + // Lazy-add the new callbacks in a way that preserves old ones + for ( code in map ) { + statusCode[ code ] = [ statusCode[ code ], map[ code ] ]; + } + } + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + var finalText = statusText || strAbort; + if ( transport ) { + transport.abort( finalText ); + } + done( 0, finalText ); + return this; + } + }; + + // Attach deferreds + deferred.promise( jqXHR ); + + // Add protocol if not provided (prefilters might expect it) + // Handle falsy url in the settings object (#10093: consistency with old signature) + // We also use the url parameter if available + s.url = ( ( url || s.url || location.href ) + "" ) + .replace( rprotocol, location.protocol + "//" ); + + // Alias method option to type as per ticket #12004 + s.type = options.method || options.type || s.method || s.type; + + // Extract dataTypes list + s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ]; + + // A cross-domain request is in order when the origin doesn't match the current origin. + if ( s.crossDomain == null ) { + urlAnchor = document.createElement( "a" ); + + // Support: IE <=8 - 11, Edge 12 - 15 + // IE throws exception on accessing the href property if url is malformed, + // e.g. http://example.com:80x/ + try { + urlAnchor.href = s.url; + + // Support: IE <=8 - 11 only + // Anchor's host property isn't correctly set when s.url is relative + urlAnchor.href = urlAnchor.href; + s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !== + urlAnchor.protocol + "//" + urlAnchor.host; + } catch ( e ) { + + // If there is an error parsing the URL, assume it is crossDomain, + // it can be rejected by the transport if it is invalid + s.crossDomain = true; + } + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( completed ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + // Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118) + fireGlobals = jQuery.event && s.global; + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Save the URL in case we're toying with the If-Modified-Since + // and/or If-None-Match header later on + // Remove hash to simplify url manipulation + cacheURL = s.url.replace( rhash, "" ); + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // Remember the hash so we can put it back + uncached = s.url.slice( cacheURL.length ); + + // If data is available and should be processed, append data to url + if ( s.data && ( s.processData || typeof s.data === "string" ) ) { + cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data; + + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Add or update anti-cache param if needed + if ( s.cache === false ) { + cacheURL = cacheURL.replace( rantiCache, "$1" ); + uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) + + uncached; + } + + // Put hash and anti-cache on the URL that will be requested (gh-1732) + s.url = cacheURL + uncached; + + // Change '%20' to '+' if this is encoded form body content (gh-2658) + } else if ( s.data && s.processData && + ( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) { + s.data = s.data.replace( r20, "+" ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + if ( jQuery.lastModified[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] ); + } + if ( jQuery.etag[ cacheURL ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ? + s.accepts[ s.dataTypes[ 0 ] ] + + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && + ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) { + + // Abort if not done already and return + return jqXHR.abort(); + } + + // Aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + completeDeferred.add( s.complete ); + jqXHR.done( s.success ); + jqXHR.fail( s.error ); + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + + // If request was aborted inside ajaxSend, stop there + if ( completed ) { + return jqXHR; + } + + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = window.setTimeout( function() { + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + completed = false; + transport.send( requestHeaders, done ); + } catch ( e ) { + + // Rethrow post-completion exceptions + if ( completed ) { + throw e; + } + + // Propagate others as results + done( -1, e ); + } + } + + // Callback for when everything is done + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Ignore repeat invocations + if ( completed ) { + return; + } + + completed = true; + + // Clear timeout if it exists + if ( timeoutTimer ) { + window.clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Determine if successful + isSuccess = status >= 200 && status < 300 || status === 304; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // Use a noop converter for missing script but not if jsonp + if ( !isSuccess && + jQuery.inArray( "script", s.dataTypes ) > -1 && + jQuery.inArray( "json", s.dataTypes ) < 0 ) { + s.converters[ "text script" ] = function() {}; + } + + // Convert no matter what (that way responseXXX fields are always set) + response = ajaxConvert( s, response, jqXHR, isSuccess ); + + // If successful, handle type chaining + if ( isSuccess ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + modified = jqXHR.getResponseHeader( "Last-Modified" ); + if ( modified ) { + jQuery.lastModified[ cacheURL ] = modified; + } + modified = jqXHR.getResponseHeader( "etag" ); + if ( modified ) { + jQuery.etag[ cacheURL ] = modified; + } + } + + // if no content + if ( status === 204 || s.type === "HEAD" ) { + statusText = "nocontent"; + + // if not modified + } else if ( status === 304 ) { + statusText = "notmodified"; + + // If we have data, let's convert it + } else { + statusText = response.state; + success = response.data; + error = response.error; + isSuccess = !error; + } + } else { + + // Extract error from statusText and normalize for non-aborts + error = statusText; + if ( status || !statusText ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError", + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + return jqXHR; + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + } +} ); + +jQuery.each( [ "get", "post" ], function( _i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + + // Shift arguments if data argument was omitted + if ( isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + // The url can be an options object (which then must have .url) + return jQuery.ajax( jQuery.extend( { + url: url, + type: method, + dataType: type, + data: data, + success: callback + }, jQuery.isPlainObject( url ) && url ) ); + }; +} ); + +jQuery.ajaxPrefilter( function( s ) { + var i; + for ( i in s.headers ) { + if ( i.toLowerCase() === "content-type" ) { + s.contentType = s.headers[ i ] || ""; + } + } +} ); + + +jQuery._evalUrl = function( url, options, doc ) { + return jQuery.ajax( { + url: url, + + // Make this explicit, since user can override this through ajaxSetup (#11264) + type: "GET", + dataType: "script", + cache: true, + async: false, + global: false, + + // Only evaluate the response if it is successful (gh-4126) + // dataFilter is not invoked for failure responses, so using it instead + // of the default converter is kludgy but it works. + converters: { + "text script": function() {} + }, + dataFilter: function( response ) { + jQuery.globalEval( response, options, doc ); + } + } ); +}; + + +jQuery.fn.extend( { + wrapAll: function( html ) { + var wrap; + + if ( this[ 0 ] ) { + if ( isFunction( html ) ) { + html = html.call( this[ 0 ] ); + } + + // The elements to wrap the target around + wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true ); + + if ( this[ 0 ].parentNode ) { + wrap.insertBefore( this[ 0 ] ); + } + + wrap.map( function() { + var elem = this; + + while ( elem.firstElementChild ) { + elem = elem.firstElementChild; + } + + return elem; + } ).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( isFunction( html ) ) { + return this.each( function( i ) { + jQuery( this ).wrapInner( html.call( this, i ) ); + } ); + } + + return this.each( function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + } ); + }, + + wrap: function( html ) { + var htmlIsFunction = isFunction( html ); + + return this.each( function( i ) { + jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html ); + } ); + }, + + unwrap: function( selector ) { + this.parent( selector ).not( "body" ).each( function() { + jQuery( this ).replaceWith( this.childNodes ); + } ); + return this; + } +} ); + + +jQuery.expr.pseudos.hidden = function( elem ) { + return !jQuery.expr.pseudos.visible( elem ); +}; +jQuery.expr.pseudos.visible = function( elem ) { + return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length ); +}; + + + + +jQuery.ajaxSettings.xhr = function() { + try { + return new window.XMLHttpRequest(); + } catch ( e ) {} +}; + +var xhrSuccessStatus = { + + // File protocol always yields status code 0, assume 200 + 0: 200, + + // Support: IE <=9 only + // #1450: sometimes IE returns 1223 when it should be 204 + 1223: 204 + }, + xhrSupported = jQuery.ajaxSettings.xhr(); + +support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported ); +support.ajax = xhrSupported = !!xhrSupported; + +jQuery.ajaxTransport( function( options ) { + var callback, errorCallback; + + // Cross domain only allowed if supported through XMLHttpRequest + if ( support.cors || xhrSupported && !options.crossDomain ) { + return { + send: function( headers, complete ) { + var i, + xhr = options.xhr(); + + xhr.open( + options.type, + options.url, + options.async, + options.username, + options.password + ); + + // Apply custom fields if provided + if ( options.xhrFields ) { + for ( i in options.xhrFields ) { + xhr[ i ] = options.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( options.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( options.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Set headers + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + + // Callback + callback = function( type ) { + return function() { + if ( callback ) { + callback = errorCallback = xhr.onload = + xhr.onerror = xhr.onabort = xhr.ontimeout = + xhr.onreadystatechange = null; + + if ( type === "abort" ) { + xhr.abort(); + } else if ( type === "error" ) { + + // Support: IE <=9 only + // On a manual native abort, IE9 throws + // errors on any property access that is not readyState + if ( typeof xhr.status !== "number" ) { + complete( 0, "error" ); + } else { + complete( + + // File: protocol always yields status 0; see #8605, #14207 + xhr.status, + xhr.statusText + ); + } + } else { + complete( + xhrSuccessStatus[ xhr.status ] || xhr.status, + xhr.statusText, + + // Support: IE <=9 only + // IE9 has no XHR2 but throws on binary (trac-11426) + // For XHR2 non-text, let the caller handle it (gh-2498) + ( xhr.responseType || "text" ) !== "text" || + typeof xhr.responseText !== "string" ? + { binary: xhr.response } : + { text: xhr.responseText }, + xhr.getAllResponseHeaders() + ); + } + } + }; + }; + + // Listen to events + xhr.onload = callback(); + errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" ); + + // Support: IE 9 only + // Use onreadystatechange to replace onabort + // to handle uncaught aborts + if ( xhr.onabort !== undefined ) { + xhr.onabort = errorCallback; + } else { + xhr.onreadystatechange = function() { + + // Check readyState before timeout as it changes + if ( xhr.readyState === 4 ) { + + // Allow onerror to be called first, + // but that will not handle a native abort + // Also, save errorCallback to a variable + // as xhr.onerror cannot be accessed + window.setTimeout( function() { + if ( callback ) { + errorCallback(); + } + } ); + } + }; + } + + // Create the abort callback + callback = callback( "abort" ); + + try { + + // Do send the request (this may raise an exception) + xhr.send( options.hasContent && options.data || null ); + } catch ( e ) { + + // #14683: Only rethrow if this hasn't been notified as an error yet + if ( callback ) { + throw e; + } + } + }, + + abort: function() { + if ( callback ) { + callback(); + } + } + }; + } +} ); + + + + +// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432) +jQuery.ajaxPrefilter( function( s ) { + if ( s.crossDomain ) { + s.contents.script = false; + } +} ); + +// Install script dataType +jQuery.ajaxSetup( { + accepts: { + script: "text/javascript, application/javascript, " + + "application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /\b(?:java|ecma)script\b/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +} ); + +// Handle cache's special case and crossDomain +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + } +} ); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function( s ) { + + // This transport only deals with cross domain or forced-by-attrs requests + if ( s.crossDomain || s.scriptAttrs ) { + var script, callback; + return { + send: function( _, complete ) { + script = jQuery( " +{% endmacro %} diff --git a/_static/scripts/bootstrap.js b/_static/scripts/bootstrap.js new file mode 100644 index 0000000000..bda8a60272 --- /dev/null +++ b/_static/scripts/bootstrap.js @@ -0,0 +1,3 @@ +/*! For license information please see bootstrap.js.LICENSE.txt */ +(()=>{"use strict";var t={d:(e,i)=>{for(var n in i)t.o(i,n)&&!t.o(e,n)&&Object.defineProperty(e,n,{enumerable:!0,get:i[n]})},o:(t,e)=>Object.prototype.hasOwnProperty.call(t,e),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},e={};t.r(e),t.d(e,{afterMain:()=>w,afterRead:()=>b,afterWrite:()=>T,applyStyles:()=>D,arrow:()=>G,auto:()=>r,basePlacements:()=>a,beforeMain:()=>v,beforeRead:()=>g,beforeWrite:()=>E,bottom:()=>n,clippingParents:()=>h,computeStyles:()=>et,createPopper:()=>St,createPopperBase:()=>Lt,createPopperLite:()=>Dt,detectOverflow:()=>gt,end:()=>c,eventListeners:()=>nt,flip:()=>_t,hide:()=>yt,left:()=>o,main:()=>y,modifierPhases:()=>C,offset:()=>wt,placements:()=>m,popper:()=>u,popperGenerator:()=>kt,popperOffsets:()=>Et,preventOverflow:()=>At,read:()=>_,reference:()=>f,right:()=>s,start:()=>l,top:()=>i,variationPlacements:()=>p,viewport:()=>d,write:()=>A});var i="top",n="bottom",s="right",o="left",r="auto",a=[i,n,s,o],l="start",c="end",h="clippingParents",d="viewport",u="popper",f="reference",p=a.reduce((function(t,e){return t.concat([e+"-"+l,e+"-"+c])}),[]),m=[].concat(a,[r]).reduce((function(t,e){return t.concat([e,e+"-"+l,e+"-"+c])}),[]),g="beforeRead",_="read",b="afterRead",v="beforeMain",y="main",w="afterMain",E="beforeWrite",A="write",T="afterWrite",C=[g,_,b,v,y,w,E,A,T];function O(t){return t?(t.nodeName||"").toLowerCase():null}function x(t){if(null==t)return window;if("[object Window]"!==t.toString()){var e=t.ownerDocument;return e&&e.defaultView||window}return t}function k(t){return t instanceof x(t).Element||t instanceof Element}function L(t){return t instanceof x(t).HTMLElement||t instanceof HTMLElement}function S(t){return"undefined"!=typeof ShadowRoot&&(t instanceof x(t).ShadowRoot||t instanceof ShadowRoot)}const D={name:"applyStyles",enabled:!0,phase:"write",fn:function(t){var e=t.state;Object.keys(e.elements).forEach((function(t){var i=e.styles[t]||{},n=e.attributes[t]||{},s=e.elements[t];L(s)&&O(s)&&(Object.assign(s.style,i),Object.keys(n).forEach((function(t){var e=n[t];!1===e?s.removeAttribute(t):s.setAttribute(t,!0===e?"":e)})))}))},effect:function(t){var e=t.state,i={popper:{position:e.options.strategy,left:"0",top:"0",margin:"0"},arrow:{position:"absolute"},reference:{}};return Object.assign(e.elements.popper.style,i.popper),e.styles=i,e.elements.arrow&&Object.assign(e.elements.arrow.style,i.arrow),function(){Object.keys(e.elements).forEach((function(t){var n=e.elements[t],s=e.attributes[t]||{},o=Object.keys(e.styles.hasOwnProperty(t)?e.styles[t]:i[t]).reduce((function(t,e){return t[e]="",t}),{});L(n)&&O(n)&&(Object.assign(n.style,o),Object.keys(s).forEach((function(t){n.removeAttribute(t)})))}))}},requires:["computeStyles"]};function $(t){return t.split("-")[0]}var I=Math.max,N=Math.min,P=Math.round;function M(){var t=navigator.userAgentData;return null!=t&&t.brands&&Array.isArray(t.brands)?t.brands.map((function(t){return t.brand+"/"+t.version})).join(" "):navigator.userAgent}function j(){return!/^((?!chrome|android).)*safari/i.test(M())}function F(t,e,i){void 0===e&&(e=!1),void 0===i&&(i=!1);var n=t.getBoundingClientRect(),s=1,o=1;e&&L(t)&&(s=t.offsetWidth>0&&P(n.width)/t.offsetWidth||1,o=t.offsetHeight>0&&P(n.height)/t.offsetHeight||1);var r=(k(t)?x(t):window).visualViewport,a=!j()&&i,l=(n.left+(a&&r?r.offsetLeft:0))/s,c=(n.top+(a&&r?r.offsetTop:0))/o,h=n.width/s,d=n.height/o;return{width:h,height:d,top:c,right:l+h,bottom:c+d,left:l,x:l,y:c}}function H(t){var e=F(t),i=t.offsetWidth,n=t.offsetHeight;return Math.abs(e.width-i)<=1&&(i=e.width),Math.abs(e.height-n)<=1&&(n=e.height),{x:t.offsetLeft,y:t.offsetTop,width:i,height:n}}function B(t,e){var i=e.getRootNode&&e.getRootNode();if(t.contains(e))return!0;if(i&&S(i)){var n=e;do{if(n&&t.isSameNode(n))return!0;n=n.parentNode||n.host}while(n)}return!1}function W(t){return x(t).getComputedStyle(t)}function z(t){return["table","td","th"].indexOf(O(t))>=0}function R(t){return((k(t)?t.ownerDocument:t.document)||window.document).documentElement}function q(t){return"html"===O(t)?t:t.assignedSlot||t.parentNode||(S(t)?t.host:null)||R(t)}function V(t){return L(t)&&"fixed"!==W(t).position?t.offsetParent:null}function Y(t){for(var e=x(t),i=V(t);i&&z(i)&&"static"===W(i).position;)i=V(i);return i&&("html"===O(i)||"body"===O(i)&&"static"===W(i).position)?e:i||function(t){var e=/firefox/i.test(M());if(/Trident/i.test(M())&&L(t)&&"fixed"===W(t).position)return null;var i=q(t);for(S(i)&&(i=i.host);L(i)&&["html","body"].indexOf(O(i))<0;){var n=W(i);if("none"!==n.transform||"none"!==n.perspective||"paint"===n.contain||-1!==["transform","perspective"].indexOf(n.willChange)||e&&"filter"===n.willChange||e&&n.filter&&"none"!==n.filter)return i;i=i.parentNode}return null}(t)||e}function K(t){return["top","bottom"].indexOf(t)>=0?"x":"y"}function Q(t,e,i){return I(t,N(e,i))}function X(t){return Object.assign({},{top:0,right:0,bottom:0,left:0},t)}function U(t,e){return e.reduce((function(e,i){return e[i]=t,e}),{})}const G={name:"arrow",enabled:!0,phase:"main",fn:function(t){var e,r=t.state,l=t.name,c=t.options,h=r.elements.arrow,d=r.modifiersData.popperOffsets,u=$(r.placement),f=K(u),p=[o,s].indexOf(u)>=0?"height":"width";if(h&&d){var m=function(t,e){return X("number"!=typeof(t="function"==typeof t?t(Object.assign({},e.rects,{placement:e.placement})):t)?t:U(t,a))}(c.padding,r),g=H(h),_="y"===f?i:o,b="y"===f?n:s,v=r.rects.reference[p]+r.rects.reference[f]-d[f]-r.rects.popper[p],y=d[f]-r.rects.reference[f],w=Y(h),E=w?"y"===f?w.clientHeight||0:w.clientWidth||0:0,A=v/2-y/2,T=m[_],C=E-g[p]-m[b],O=E/2-g[p]/2+A,x=Q(T,O,C),k=f;r.modifiersData[l]=((e={})[k]=x,e.centerOffset=x-O,e)}},effect:function(t){var e=t.state,i=t.options.element,n=void 0===i?"[data-popper-arrow]":i;null!=n&&("string"!=typeof n||(n=e.elements.popper.querySelector(n)))&&B(e.elements.popper,n)&&(e.elements.arrow=n)},requires:["popperOffsets"],requiresIfExists:["preventOverflow"]};function J(t){return t.split("-")[1]}var Z={top:"auto",right:"auto",bottom:"auto",left:"auto"};function tt(t){var e,r=t.popper,a=t.popperRect,l=t.placement,h=t.variation,d=t.offsets,u=t.position,f=t.gpuAcceleration,p=t.adaptive,m=t.roundOffsets,g=t.isFixed,_=d.x,b=void 0===_?0:_,v=d.y,y=void 0===v?0:v,w="function"==typeof m?m({x:b,y}):{x:b,y};b=w.x,y=w.y;var E=d.hasOwnProperty("x"),A=d.hasOwnProperty("y"),T=o,C=i,O=window;if(p){var k=Y(r),L="clientHeight",S="clientWidth";k===x(r)&&"static"!==W(k=R(r)).position&&"absolute"===u&&(L="scrollHeight",S="scrollWidth"),(l===i||(l===o||l===s)&&h===c)&&(C=n,y-=(g&&k===O&&O.visualViewport?O.visualViewport.height:k[L])-a.height,y*=f?1:-1),l!==o&&(l!==i&&l!==n||h!==c)||(T=s,b-=(g&&k===O&&O.visualViewport?O.visualViewport.width:k[S])-a.width,b*=f?1:-1)}var D,$=Object.assign({position:u},p&&Z),I=!0===m?function(t,e){var i=t.x,n=t.y,s=e.devicePixelRatio||1;return{x:P(i*s)/s||0,y:P(n*s)/s||0}}({x:b,y},x(r)):{x:b,y};return b=I.x,y=I.y,f?Object.assign({},$,((D={})[C]=A?"0":"",D[T]=E?"0":"",D.transform=(O.devicePixelRatio||1)<=1?"translate("+b+"px, "+y+"px)":"translate3d("+b+"px, "+y+"px, 0)",D)):Object.assign({},$,((e={})[C]=A?y+"px":"",e[T]=E?b+"px":"",e.transform="",e))}const et={name:"computeStyles",enabled:!0,phase:"beforeWrite",fn:function(t){var e=t.state,i=t.options,n=i.gpuAcceleration,s=void 0===n||n,o=i.adaptive,r=void 0===o||o,a=i.roundOffsets,l=void 0===a||a,c={placement:$(e.placement),variation:J(e.placement),popper:e.elements.popper,popperRect:e.rects.popper,gpuAcceleration:s,isFixed:"fixed"===e.options.strategy};null!=e.modifiersData.popperOffsets&&(e.styles.popper=Object.assign({},e.styles.popper,tt(Object.assign({},c,{offsets:e.modifiersData.popperOffsets,position:e.options.strategy,adaptive:r,roundOffsets:l})))),null!=e.modifiersData.arrow&&(e.styles.arrow=Object.assign({},e.styles.arrow,tt(Object.assign({},c,{offsets:e.modifiersData.arrow,position:"absolute",adaptive:!1,roundOffsets:l})))),e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-placement":e.placement})},data:{}};var it={passive:!0};const nt={name:"eventListeners",enabled:!0,phase:"write",fn:function(){},effect:function(t){var e=t.state,i=t.instance,n=t.options,s=n.scroll,o=void 0===s||s,r=n.resize,a=void 0===r||r,l=x(e.elements.popper),c=[].concat(e.scrollParents.reference,e.scrollParents.popper);return o&&c.forEach((function(t){t.addEventListener("scroll",i.update,it)})),a&&l.addEventListener("resize",i.update,it),function(){o&&c.forEach((function(t){t.removeEventListener("scroll",i.update,it)})),a&&l.removeEventListener("resize",i.update,it)}},data:{}};var st={left:"right",right:"left",bottom:"top",top:"bottom"};function ot(t){return t.replace(/left|right|bottom|top/g,(function(t){return st[t]}))}var rt={start:"end",end:"start"};function at(t){return t.replace(/start|end/g,(function(t){return rt[t]}))}function lt(t){var e=x(t);return{scrollLeft:e.pageXOffset,scrollTop:e.pageYOffset}}function ct(t){return F(R(t)).left+lt(t).scrollLeft}function ht(t){var e=W(t),i=e.overflow,n=e.overflowX,s=e.overflowY;return/auto|scroll|overlay|hidden/.test(i+s+n)}function dt(t){return["html","body","#document"].indexOf(O(t))>=0?t.ownerDocument.body:L(t)&&ht(t)?t:dt(q(t))}function ut(t,e){var i;void 0===e&&(e=[]);var n=dt(t),s=n===(null==(i=t.ownerDocument)?void 0:i.body),o=x(n),r=s?[o].concat(o.visualViewport||[],ht(n)?n:[]):n,a=e.concat(r);return s?a:a.concat(ut(q(r)))}function ft(t){return Object.assign({},t,{left:t.x,top:t.y,right:t.x+t.width,bottom:t.y+t.height})}function pt(t,e,i){return e===d?ft(function(t,e){var i=x(t),n=R(t),s=i.visualViewport,o=n.clientWidth,r=n.clientHeight,a=0,l=0;if(s){o=s.width,r=s.height;var c=j();(c||!c&&"fixed"===e)&&(a=s.offsetLeft,l=s.offsetTop)}return{width:o,height:r,x:a+ct(t),y:l}}(t,i)):k(e)?function(t,e){var i=F(t,!1,"fixed"===e);return i.top=i.top+t.clientTop,i.left=i.left+t.clientLeft,i.bottom=i.top+t.clientHeight,i.right=i.left+t.clientWidth,i.width=t.clientWidth,i.height=t.clientHeight,i.x=i.left,i.y=i.top,i}(e,i):ft(function(t){var e,i=R(t),n=lt(t),s=null==(e=t.ownerDocument)?void 0:e.body,o=I(i.scrollWidth,i.clientWidth,s?s.scrollWidth:0,s?s.clientWidth:0),r=I(i.scrollHeight,i.clientHeight,s?s.scrollHeight:0,s?s.clientHeight:0),a=-n.scrollLeft+ct(t),l=-n.scrollTop;return"rtl"===W(s||i).direction&&(a+=I(i.clientWidth,s?s.clientWidth:0)-o),{width:o,height:r,x:a,y:l}}(R(t)))}function mt(t){var e,r=t.reference,a=t.element,h=t.placement,d=h?$(h):null,u=h?J(h):null,f=r.x+r.width/2-a.width/2,p=r.y+r.height/2-a.height/2;switch(d){case i:e={x:f,y:r.y-a.height};break;case n:e={x:f,y:r.y+r.height};break;case s:e={x:r.x+r.width,y:p};break;case o:e={x:r.x-a.width,y:p};break;default:e={x:r.x,y:r.y}}var m=d?K(d):null;if(null!=m){var g="y"===m?"height":"width";switch(u){case l:e[m]=e[m]-(r[g]/2-a[g]/2);break;case c:e[m]=e[m]+(r[g]/2-a[g]/2)}}return e}function gt(t,e){void 0===e&&(e={});var o=e,r=o.placement,l=void 0===r?t.placement:r,c=o.strategy,p=void 0===c?t.strategy:c,m=o.boundary,g=void 0===m?h:m,_=o.rootBoundary,b=void 0===_?d:_,v=o.elementContext,y=void 0===v?u:v,w=o.altBoundary,E=void 0!==w&&w,A=o.padding,T=void 0===A?0:A,C=X("number"!=typeof T?T:U(T,a)),x=y===u?f:u,S=t.rects.popper,D=t.elements[E?x:y],$=function(t,e,i,n){var s="clippingParents"===e?function(t){var e=ut(q(t)),i=["absolute","fixed"].indexOf(W(t).position)>=0&&L(t)?Y(t):t;return k(i)?e.filter((function(t){return k(t)&&B(t,i)&&"body"!==O(t)})):[]}(t):[].concat(e),o=[].concat(s,[i]),r=o[0],a=o.reduce((function(e,i){var s=pt(t,i,n);return e.top=I(s.top,e.top),e.right=N(s.right,e.right),e.bottom=N(s.bottom,e.bottom),e.left=I(s.left,e.left),e}),pt(t,r,n));return a.width=a.right-a.left,a.height=a.bottom-a.top,a.x=a.left,a.y=a.top,a}(k(D)?D:D.contextElement||R(t.elements.popper),g,b,p),P=F(t.elements.reference),M=mt({reference:P,element:S,strategy:"absolute",placement:l}),j=ft(Object.assign({},S,M)),H=y===u?j:P,z={top:$.top-H.top+C.top,bottom:H.bottom-$.bottom+C.bottom,left:$.left-H.left+C.left,right:H.right-$.right+C.right},V=t.modifiersData.offset;if(y===u&&V){var K=V[l];Object.keys(z).forEach((function(t){var e=[s,n].indexOf(t)>=0?1:-1,o=[i,n].indexOf(t)>=0?"y":"x";z[t]+=K[o]*e}))}return z}const _t={name:"flip",enabled:!0,phase:"main",fn:function(t){var e=t.state,c=t.options,h=t.name;if(!e.modifiersData[h]._skip){for(var d=c.mainAxis,u=void 0===d||d,f=c.altAxis,g=void 0===f||f,_=c.fallbackPlacements,b=c.padding,v=c.boundary,y=c.rootBoundary,w=c.altBoundary,E=c.flipVariations,A=void 0===E||E,T=c.allowedAutoPlacements,C=e.options.placement,O=$(C),x=_||(O!==C&&A?function(t){if($(t)===r)return[];var e=ot(t);return[at(t),e,at(e)]}(C):[ot(C)]),k=[C].concat(x).reduce((function(t,i){return t.concat($(i)===r?function(t,e){void 0===e&&(e={});var i=e,n=i.placement,s=i.boundary,o=i.rootBoundary,r=i.padding,l=i.flipVariations,c=i.allowedAutoPlacements,h=void 0===c?m:c,d=J(n),u=d?l?p:p.filter((function(t){return J(t)===d})):a,f=u.filter((function(t){return h.indexOf(t)>=0}));0===f.length&&(f=u);var g=f.reduce((function(e,i){return e[i]=gt(t,{placement:i,boundary:s,rootBoundary:o,padding:r})[$(i)],e}),{});return Object.keys(g).sort((function(t,e){return g[t]-g[e]}))}(e,{placement:i,boundary:v,rootBoundary:y,padding:b,flipVariations:A,allowedAutoPlacements:T}):i)}),[]),L=e.rects.reference,S=e.rects.popper,D=new Map,I=!0,N=k[0],P=0;P=0,B=H?"width":"height",W=gt(e,{placement:M,boundary:v,rootBoundary:y,altBoundary:w,padding:b}),z=H?F?s:o:F?n:i;L[B]>S[B]&&(z=ot(z));var R=ot(z),q=[];if(u&&q.push(W[j]<=0),g&&q.push(W[z]<=0,W[R]<=0),q.every((function(t){return t}))){N=M,I=!1;break}D.set(M,q)}if(I)for(var V=function(t){var e=k.find((function(e){var i=D.get(e);if(i)return i.slice(0,t).every((function(t){return t}))}));if(e)return N=e,"break"},Y=A?3:1;Y>0&&"break"!==V(Y);Y--);e.placement!==N&&(e.modifiersData[h]._skip=!0,e.placement=N,e.reset=!0)}},requiresIfExists:["offset"],data:{_skip:!1}};function bt(t,e,i){return void 0===i&&(i={x:0,y:0}),{top:t.top-e.height-i.y,right:t.right-e.width+i.x,bottom:t.bottom-e.height+i.y,left:t.left-e.width-i.x}}function vt(t){return[i,s,n,o].some((function(e){return t[e]>=0}))}const yt={name:"hide",enabled:!0,phase:"main",requiresIfExists:["preventOverflow"],fn:function(t){var e=t.state,i=t.name,n=e.rects.reference,s=e.rects.popper,o=e.modifiersData.preventOverflow,r=gt(e,{elementContext:"reference"}),a=gt(e,{altBoundary:!0}),l=bt(r,n),c=bt(a,s,o),h=vt(l),d=vt(c);e.modifiersData[i]={referenceClippingOffsets:l,popperEscapeOffsets:c,isReferenceHidden:h,hasPopperEscaped:d},e.attributes.popper=Object.assign({},e.attributes.popper,{"data-popper-reference-hidden":h,"data-popper-escaped":d})}},wt={name:"offset",enabled:!0,phase:"main",requires:["popperOffsets"],fn:function(t){var e=t.state,n=t.options,r=t.name,a=n.offset,l=void 0===a?[0,0]:a,c=m.reduce((function(t,n){return t[n]=function(t,e,n){var r=$(t),a=[o,i].indexOf(r)>=0?-1:1,l="function"==typeof n?n(Object.assign({},e,{placement:t})):n,c=l[0],h=l[1];return c=c||0,h=(h||0)*a,[o,s].indexOf(r)>=0?{x:h,y:c}:{x:c,y:h}}(n,e.rects,l),t}),{}),h=c[e.placement],d=h.x,u=h.y;null!=e.modifiersData.popperOffsets&&(e.modifiersData.popperOffsets.x+=d,e.modifiersData.popperOffsets.y+=u),e.modifiersData[r]=c}},Et={name:"popperOffsets",enabled:!0,phase:"read",fn:function(t){var e=t.state,i=t.name;e.modifiersData[i]=mt({reference:e.rects.reference,element:e.rects.popper,strategy:"absolute",placement:e.placement})},data:{}},At={name:"preventOverflow",enabled:!0,phase:"main",fn:function(t){var e=t.state,r=t.options,a=t.name,c=r.mainAxis,h=void 0===c||c,d=r.altAxis,u=void 0!==d&&d,f=r.boundary,p=r.rootBoundary,m=r.altBoundary,g=r.padding,_=r.tether,b=void 0===_||_,v=r.tetherOffset,y=void 0===v?0:v,w=gt(e,{boundary:f,rootBoundary:p,padding:g,altBoundary:m}),E=$(e.placement),A=J(e.placement),T=!A,C=K(E),O="x"===C?"y":"x",x=e.modifiersData.popperOffsets,k=e.rects.reference,L=e.rects.popper,S="function"==typeof y?y(Object.assign({},e.rects,{placement:e.placement})):y,D="number"==typeof S?{mainAxis:S,altAxis:S}:Object.assign({mainAxis:0,altAxis:0},S),P=e.modifiersData.offset?e.modifiersData.offset[e.placement]:null,M={x:0,y:0};if(x){if(h){var j,F="y"===C?i:o,B="y"===C?n:s,W="y"===C?"height":"width",z=x[C],R=z+w[F],q=z-w[B],V=b?-L[W]/2:0,X=A===l?k[W]:L[W],U=A===l?-L[W]:-k[W],G=e.elements.arrow,Z=b&&G?H(G):{width:0,height:0},tt=e.modifiersData["arrow#persistent"]?e.modifiersData["arrow#persistent"].padding:{top:0,right:0,bottom:0,left:0},et=tt[F],it=tt[B],nt=Q(0,k[W],Z[W]),st=T?k[W]/2-V-nt-et-D.mainAxis:X-nt-et-D.mainAxis,ot=T?-k[W]/2+V+nt+it+D.mainAxis:U+nt+it+D.mainAxis,rt=e.elements.arrow&&Y(e.elements.arrow),at=rt?"y"===C?rt.clientTop||0:rt.clientLeft||0:0,lt=null!=(j=null==P?void 0:P[C])?j:0,ct=z+ot-lt,ht=Q(b?N(R,z+st-lt-at):R,z,b?I(q,ct):q);x[C]=ht,M[C]=ht-z}if(u){var dt,ut="x"===C?i:o,ft="x"===C?n:s,pt=x[O],mt="y"===O?"height":"width",_t=pt+w[ut],bt=pt-w[ft],vt=-1!==[i,o].indexOf(E),yt=null!=(dt=null==P?void 0:P[O])?dt:0,wt=vt?_t:pt-k[mt]-L[mt]-yt+D.altAxis,Et=vt?pt+k[mt]+L[mt]-yt-D.altAxis:bt,At=b&&vt?function(t,e,i){var n=Q(t,e,i);return n>i?i:n}(wt,pt,Et):Q(b?wt:_t,pt,b?Et:bt);x[O]=At,M[O]=At-pt}e.modifiersData[a]=M}},requiresIfExists:["offset"]};function Tt(t,e,i){void 0===i&&(i=!1);var n,s,o=L(e),r=L(e)&&function(t){var e=t.getBoundingClientRect(),i=P(e.width)/t.offsetWidth||1,n=P(e.height)/t.offsetHeight||1;return 1!==i||1!==n}(e),a=R(e),l=F(t,r,i),c={scrollLeft:0,scrollTop:0},h={x:0,y:0};return(o||!o&&!i)&&(("body"!==O(e)||ht(a))&&(c=(n=e)!==x(n)&&L(n)?{scrollLeft:(s=n).scrollLeft,scrollTop:s.scrollTop}:lt(n)),L(e)?((h=F(e,!0)).x+=e.clientLeft,h.y+=e.clientTop):a&&(h.x=ct(a))),{x:l.left+c.scrollLeft-h.x,y:l.top+c.scrollTop-h.y,width:l.width,height:l.height}}function Ct(t){var e=new Map,i=new Set,n=[];function s(t){i.add(t.name),[].concat(t.requires||[],t.requiresIfExists||[]).forEach((function(t){if(!i.has(t)){var n=e.get(t);n&&s(n)}})),n.push(t)}return t.forEach((function(t){e.set(t.name,t)})),t.forEach((function(t){i.has(t.name)||s(t)})),n}var Ot={placement:"bottom",modifiers:[],strategy:"absolute"};function xt(){for(var t=arguments.length,e=new Array(t),i=0;i$t.has(t)&&$t.get(t).get(e)||null,remove(t,e){if(!$t.has(t))return;const i=$t.get(t);i.delete(e),0===i.size&&$t.delete(t)}},Nt="transitionend",Pt=t=>(t&&window.CSS&&window.CSS.escape&&(t=t.replace(/#([^\s"#']+)/g,((t,e)=>`#${CSS.escape(e)}`))),t),Mt=t=>{t.dispatchEvent(new Event(Nt))},jt=t=>!(!t||"object"!=typeof t)&&(void 0!==t.jquery&&(t=t[0]),void 0!==t.nodeType),Ft=t=>jt(t)?t.jquery?t[0]:t:"string"==typeof t&&t.length>0?document.querySelector(Pt(t)):null,Ht=t=>{if(!jt(t)||0===t.getClientRects().length)return!1;const e="visible"===getComputedStyle(t).getPropertyValue("visibility"),i=t.closest("details:not([open])");if(!i)return e;if(i!==t){const e=t.closest("summary");if(e&&e.parentNode!==i)return!1;if(null===e)return!1}return e},Bt=t=>!t||t.nodeType!==Node.ELEMENT_NODE||!!t.classList.contains("disabled")||(void 0!==t.disabled?t.disabled:t.hasAttribute("disabled")&&"false"!==t.getAttribute("disabled")),Wt=t=>{if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){const e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?Wt(t.parentNode):null},zt=()=>{},Rt=t=>{t.offsetHeight},qt=()=>window.jQuery&&!document.body.hasAttribute("data-bs-no-jquery")?window.jQuery:null,Vt=[],Yt=()=>"rtl"===document.documentElement.dir,Kt=t=>{var e;e=()=>{const e=qt();if(e){const i=t.NAME,n=e.fn[i];e.fn[i]=t.jQueryInterface,e.fn[i].Constructor=t,e.fn[i].noConflict=()=>(e.fn[i]=n,t.jQueryInterface)}},"loading"===document.readyState?(Vt.length||document.addEventListener("DOMContentLoaded",(()=>{for(const t of Vt)t()})),Vt.push(e)):e()},Qt=(t,e=[],i=t)=>"function"==typeof t?t(...e):i,Xt=(t,e,i=!0)=>{if(!i)return void Qt(t);const n=(t=>{if(!t)return 0;let{transitionDuration:e,transitionDelay:i}=window.getComputedStyle(t);const n=Number.parseFloat(e),s=Number.parseFloat(i);return n||s?(e=e.split(",")[0],i=i.split(",")[0],1e3*(Number.parseFloat(e)+Number.parseFloat(i))):0})(e)+5;let s=!1;const o=({target:i})=>{i===e&&(s=!0,e.removeEventListener(Nt,o),Qt(t))};e.addEventListener(Nt,o),setTimeout((()=>{s||Mt(e)}),n)},Ut=(t,e,i,n)=>{const s=t.length;let o=t.indexOf(e);return-1===o?!i&&n?t[s-1]:t[0]:(o+=i?1:-1,n&&(o=(o+s)%s),t[Math.max(0,Math.min(o,s-1))])},Gt=/[^.]*(?=\..*)\.|.*/,Jt=/\..*/,Zt=/::\d+$/,te={};let ee=1;const ie={mouseenter:"mouseover",mouseleave:"mouseout"},ne=new Set(["click","dblclick","mouseup","mousedown","contextmenu","mousewheel","DOMMouseScroll","mouseover","mouseout","mousemove","selectstart","selectend","keydown","keypress","keyup","orientationchange","touchstart","touchmove","touchend","touchcancel","pointerdown","pointermove","pointerup","pointerleave","pointercancel","gesturestart","gesturechange","gestureend","focus","blur","change","reset","select","submit","focusin","focusout","load","unload","beforeunload","resize","move","DOMContentLoaded","readystatechange","error","abort","scroll"]);function se(t,e){return e&&`${e}::${ee++}`||t.uidEvent||ee++}function oe(t){const e=se(t);return t.uidEvent=e,te[e]=te[e]||{},te[e]}function re(t,e,i=null){return Object.values(t).find((t=>t.callable===e&&t.delegationSelector===i))}function ae(t,e,i){const n="string"==typeof e,s=n?i:e||i;let o=de(t);return ne.has(o)||(o=t),[n,s,o]}function le(t,e,i,n,s){if("string"!=typeof e||!t)return;let[o,r,a]=ae(e,i,n);if(e in ie){const t=t=>function(e){if(!e.relatedTarget||e.relatedTarget!==e.delegateTarget&&!e.delegateTarget.contains(e.relatedTarget))return t.call(this,e)};r=t(r)}const l=oe(t),c=l[a]||(l[a]={}),h=re(c,r,o?i:null);if(h)return void(h.oneOff=h.oneOff&&s);const d=se(r,e.replace(Gt,"")),u=o?function(t,e,i){return function n(s){const o=t.querySelectorAll(e);for(let{target:r}=s;r&&r!==this;r=r.parentNode)for(const a of o)if(a===r)return fe(s,{delegateTarget:r}),n.oneOff&&ue.off(t,s.type,e,i),i.apply(r,[s])}}(t,i,r):function(t,e){return function i(n){return fe(n,{delegateTarget:t}),i.oneOff&&ue.off(t,n.type,e),e.apply(t,[n])}}(t,r);u.delegationSelector=o?i:null,u.callable=r,u.oneOff=s,u.uidEvent=d,c[d]=u,t.addEventListener(a,u,o)}function ce(t,e,i,n,s){const o=re(e[i],n,s);o&&(t.removeEventListener(i,o,Boolean(s)),delete e[i][o.uidEvent])}function he(t,e,i,n){const s=e[i]||{};for(const[o,r]of Object.entries(s))o.includes(n)&&ce(t,e,i,r.callable,r.delegationSelector)}function de(t){return t=t.replace(Jt,""),ie[t]||t}const ue={on(t,e,i,n){le(t,e,i,n,!1)},one(t,e,i,n){le(t,e,i,n,!0)},off(t,e,i,n){if("string"!=typeof e||!t)return;const[s,o,r]=ae(e,i,n),a=r!==e,l=oe(t),c=l[r]||{},h=e.startsWith(".");if(void 0===o){if(h)for(const i of Object.keys(l))he(t,l,i,e.slice(1));for(const[i,n]of Object.entries(c)){const s=i.replace(Zt,"");a&&!e.includes(s)||ce(t,l,r,n.callable,n.delegationSelector)}}else{if(!Object.keys(c).length)return;ce(t,l,r,o,s?i:null)}},trigger(t,e,i){if("string"!=typeof e||!t)return null;const n=qt();let s=null,o=!0,r=!0,a=!1;e!==de(e)&&n&&(s=n.Event(e,i),n(t).trigger(s),o=!s.isPropagationStopped(),r=!s.isImmediatePropagationStopped(),a=s.isDefaultPrevented());const l=fe(new Event(e,{bubbles:o,cancelable:!0}),i);return a&&l.preventDefault(),r&&t.dispatchEvent(l),l.defaultPrevented&&s&&s.preventDefault(),l}};function fe(t,e={}){for(const[i,n]of Object.entries(e))try{t[i]=n}catch(e){Object.defineProperty(t,i,{configurable:!0,get:()=>n})}return t}function pe(t){if("true"===t)return!0;if("false"===t)return!1;if(t===Number(t).toString())return Number(t);if(""===t||"null"===t)return null;if("string"!=typeof t)return t;try{return JSON.parse(decodeURIComponent(t))}catch(e){return t}}function me(t){return t.replace(/[A-Z]/g,(t=>`-${t.toLowerCase()}`))}const ge={setDataAttribute(t,e,i){t.setAttribute(`data-bs-${me(e)}`,i)},removeDataAttribute(t,e){t.removeAttribute(`data-bs-${me(e)}`)},getDataAttributes(t){if(!t)return{};const e={},i=Object.keys(t.dataset).filter((t=>t.startsWith("bs")&&!t.startsWith("bsConfig")));for(const n of i){let i=n.replace(/^bs/,"");i=i.charAt(0).toLowerCase()+i.slice(1,i.length),e[i]=pe(t.dataset[n])}return e},getDataAttribute:(t,e)=>pe(t.getAttribute(`data-bs-${me(e)}`))};class _e{static get Default(){return{}}static get DefaultType(){return{}}static get NAME(){throw new Error('You have to implement the static method "NAME", for each component!')}_getConfig(t){return t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t}_mergeConfigObj(t,e){const i=jt(e)?ge.getDataAttribute(e,"config"):{};return{...this.constructor.Default,..."object"==typeof i?i:{},...jt(e)?ge.getDataAttributes(e):{},..."object"==typeof t?t:{}}}_typeCheckConfig(t,e=this.constructor.DefaultType){for(const[n,s]of Object.entries(e)){const e=t[n],o=jt(e)?"element":null==(i=e)?`${i}`:Object.prototype.toString.call(i).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(s).test(o))throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option "${n}" provided type "${o}" but expected type "${s}".`)}var i}}class be extends _e{constructor(t,e){super(),(t=Ft(t))&&(this._element=t,this._config=this._getConfig(e),It.set(this._element,this.constructor.DATA_KEY,this))}dispose(){It.remove(this._element,this.constructor.DATA_KEY),ue.off(this._element,this.constructor.EVENT_KEY);for(const t of Object.getOwnPropertyNames(this))this[t]=null}_queueCallback(t,e,i=!0){Xt(t,e,i)}_getConfig(t){return t=this._mergeConfigObj(t,this._element),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}static getInstance(t){return It.get(Ft(t),this.DATA_KEY)}static getOrCreateInstance(t,e={}){return this.getInstance(t)||new this(t,"object"==typeof e?e:null)}static get VERSION(){return"5.3.2"}static get DATA_KEY(){return`bs.${this.NAME}`}static get EVENT_KEY(){return`.${this.DATA_KEY}`}static eventName(t){return`${t}${this.EVENT_KEY}`}}const ve=t=>{let e=t.getAttribute("data-bs-target");if(!e||"#"===e){let i=t.getAttribute("href");if(!i||!i.includes("#")&&!i.startsWith("."))return null;i.includes("#")&&!i.startsWith("#")&&(i=`#${i.split("#")[1]}`),e=i&&"#"!==i?Pt(i.trim()):null}return e},ye={find:(t,e=document.documentElement)=>[].concat(...Element.prototype.querySelectorAll.call(e,t)),findOne:(t,e=document.documentElement)=>Element.prototype.querySelector.call(e,t),children:(t,e)=>[].concat(...t.children).filter((t=>t.matches(e))),parents(t,e){const i=[];let n=t.parentNode.closest(e);for(;n;)i.push(n),n=n.parentNode.closest(e);return i},prev(t,e){let i=t.previousElementSibling;for(;i;){if(i.matches(e))return[i];i=i.previousElementSibling}return[]},next(t,e){let i=t.nextElementSibling;for(;i;){if(i.matches(e))return[i];i=i.nextElementSibling}return[]},focusableChildren(t){const e=["a","button","input","textarea","select","details","[tabindex]",'[contenteditable="true"]'].map((t=>`${t}:not([tabindex^="-"])`)).join(",");return this.find(e,t).filter((t=>!Bt(t)&&Ht(t)))},getSelectorFromElement(t){const e=ve(t);return e&&ye.findOne(e)?e:null},getElementFromSelector(t){const e=ve(t);return e?ye.findOne(e):null},getMultipleElementsFromSelector(t){const e=ve(t);return e?ye.find(e):[]}},we=(t,e="hide")=>{const i=`click.dismiss${t.EVENT_KEY}`,n=t.NAME;ue.on(document,i,`[data-bs-dismiss="${n}"]`,(function(i){if(["A","AREA"].includes(this.tagName)&&i.preventDefault(),Bt(this))return;const s=ye.getElementFromSelector(this)||this.closest(`.${n}`);t.getOrCreateInstance(s)[e]()}))},Ee=".bs.alert",Ae=`close${Ee}`,Te=`closed${Ee}`;class Ce extends be{static get NAME(){return"alert"}close(){if(ue.trigger(this._element,Ae).defaultPrevented)return;this._element.classList.remove("show");const t=this._element.classList.contains("fade");this._queueCallback((()=>this._destroyElement()),this._element,t)}_destroyElement(){this._element.remove(),ue.trigger(this._element,Te),this.dispose()}static jQueryInterface(t){return this.each((function(){const e=Ce.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}we(Ce,"close"),Kt(Ce);const Oe='[data-bs-toggle="button"]';class xe extends be{static get NAME(){return"button"}toggle(){this._element.setAttribute("aria-pressed",this._element.classList.toggle("active"))}static jQueryInterface(t){return this.each((function(){const e=xe.getOrCreateInstance(this);"toggle"===t&&e[t]()}))}}ue.on(document,"click.bs.button.data-api",Oe,(t=>{t.preventDefault();const e=t.target.closest(Oe);xe.getOrCreateInstance(e).toggle()})),Kt(xe);const ke=".bs.swipe",Le=`touchstart${ke}`,Se=`touchmove${ke}`,De=`touchend${ke}`,$e=`pointerdown${ke}`,Ie=`pointerup${ke}`,Ne={endCallback:null,leftCallback:null,rightCallback:null},Pe={endCallback:"(function|null)",leftCallback:"(function|null)",rightCallback:"(function|null)"};class Me extends _e{constructor(t,e){super(),this._element=t,t&&Me.isSupported()&&(this._config=this._getConfig(e),this._deltaX=0,this._supportPointerEvents=Boolean(window.PointerEvent),this._initEvents())}static get Default(){return Ne}static get DefaultType(){return Pe}static get NAME(){return"swipe"}dispose(){ue.off(this._element,ke)}_start(t){this._supportPointerEvents?this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX):this._deltaX=t.touches[0].clientX}_end(t){this._eventIsPointerPenTouch(t)&&(this._deltaX=t.clientX-this._deltaX),this._handleSwipe(),Qt(this._config.endCallback)}_move(t){this._deltaX=t.touches&&t.touches.length>1?0:t.touches[0].clientX-this._deltaX}_handleSwipe(){const t=Math.abs(this._deltaX);if(t<=40)return;const e=t/this._deltaX;this._deltaX=0,e&&Qt(e>0?this._config.rightCallback:this._config.leftCallback)}_initEvents(){this._supportPointerEvents?(ue.on(this._element,$e,(t=>this._start(t))),ue.on(this._element,Ie,(t=>this._end(t))),this._element.classList.add("pointer-event")):(ue.on(this._element,Le,(t=>this._start(t))),ue.on(this._element,Se,(t=>this._move(t))),ue.on(this._element,De,(t=>this._end(t))))}_eventIsPointerPenTouch(t){return this._supportPointerEvents&&("pen"===t.pointerType||"touch"===t.pointerType)}static isSupported(){return"ontouchstart"in document.documentElement||navigator.maxTouchPoints>0}}const je=".bs.carousel",Fe=".data-api",He="next",Be="prev",We="left",ze="right",Re=`slide${je}`,qe=`slid${je}`,Ve=`keydown${je}`,Ye=`mouseenter${je}`,Ke=`mouseleave${je}`,Qe=`dragstart${je}`,Xe=`load${je}${Fe}`,Ue=`click${je}${Fe}`,Ge="carousel",Je="active",Ze=".active",ti=".carousel-item",ei=Ze+ti,ii={ArrowLeft:ze,ArrowRight:We},ni={interval:5e3,keyboard:!0,pause:"hover",ride:!1,touch:!0,wrap:!0},si={interval:"(number|boolean)",keyboard:"boolean",pause:"(string|boolean)",ride:"(boolean|string)",touch:"boolean",wrap:"boolean"};class oi extends be{constructor(t,e){super(t,e),this._interval=null,this._activeElement=null,this._isSliding=!1,this.touchTimeout=null,this._swipeHelper=null,this._indicatorsElement=ye.findOne(".carousel-indicators",this._element),this._addEventListeners(),this._config.ride===Ge&&this.cycle()}static get Default(){return ni}static get DefaultType(){return si}static get NAME(){return"carousel"}next(){this._slide(He)}nextWhenVisible(){!document.hidden&&Ht(this._element)&&this.next()}prev(){this._slide(Be)}pause(){this._isSliding&&Mt(this._element),this._clearInterval()}cycle(){this._clearInterval(),this._updateInterval(),this._interval=setInterval((()=>this.nextWhenVisible()),this._config.interval)}_maybeEnableCycle(){this._config.ride&&(this._isSliding?ue.one(this._element,qe,(()=>this.cycle())):this.cycle())}to(t){const e=this._getItems();if(t>e.length-1||t<0)return;if(this._isSliding)return void ue.one(this._element,qe,(()=>this.to(t)));const i=this._getItemIndex(this._getActive());if(i===t)return;const n=t>i?He:Be;this._slide(n,e[t])}dispose(){this._swipeHelper&&this._swipeHelper.dispose(),super.dispose()}_configAfterMerge(t){return t.defaultInterval=t.interval,t}_addEventListeners(){this._config.keyboard&&ue.on(this._element,Ve,(t=>this._keydown(t))),"hover"===this._config.pause&&(ue.on(this._element,Ye,(()=>this.pause())),ue.on(this._element,Ke,(()=>this._maybeEnableCycle()))),this._config.touch&&Me.isSupported()&&this._addTouchEventListeners()}_addTouchEventListeners(){for(const t of ye.find(".carousel-item img",this._element))ue.on(t,Qe,(t=>t.preventDefault()));const t={leftCallback:()=>this._slide(this._directionToOrder(We)),rightCallback:()=>this._slide(this._directionToOrder(ze)),endCallback:()=>{"hover"===this._config.pause&&(this.pause(),this.touchTimeout&&clearTimeout(this.touchTimeout),this.touchTimeout=setTimeout((()=>this._maybeEnableCycle()),500+this._config.interval))}};this._swipeHelper=new Me(this._element,t)}_keydown(t){if(/input|textarea/i.test(t.target.tagName))return;const e=ii[t.key];e&&(t.preventDefault(),this._slide(this._directionToOrder(e)))}_getItemIndex(t){return this._getItems().indexOf(t)}_setActiveIndicatorElement(t){if(!this._indicatorsElement)return;const e=ye.findOne(Ze,this._indicatorsElement);e.classList.remove(Je),e.removeAttribute("aria-current");const i=ye.findOne(`[data-bs-slide-to="${t}"]`,this._indicatorsElement);i&&(i.classList.add(Je),i.setAttribute("aria-current","true"))}_updateInterval(){const t=this._activeElement||this._getActive();if(!t)return;const e=Number.parseInt(t.getAttribute("data-bs-interval"),10);this._config.interval=e||this._config.defaultInterval}_slide(t,e=null){if(this._isSliding)return;const i=this._getActive(),n=t===He,s=e||Ut(this._getItems(),i,n,this._config.wrap);if(s===i)return;const o=this._getItemIndex(s),r=e=>ue.trigger(this._element,e,{relatedTarget:s,direction:this._orderToDirection(t),from:this._getItemIndex(i),to:o});if(r(Re).defaultPrevented)return;if(!i||!s)return;const a=Boolean(this._interval);this.pause(),this._isSliding=!0,this._setActiveIndicatorElement(o),this._activeElement=s;const l=n?"carousel-item-start":"carousel-item-end",c=n?"carousel-item-next":"carousel-item-prev";s.classList.add(c),Rt(s),i.classList.add(l),s.classList.add(l),this._queueCallback((()=>{s.classList.remove(l,c),s.classList.add(Je),i.classList.remove(Je,c,l),this._isSliding=!1,r(qe)}),i,this._isAnimated()),a&&this.cycle()}_isAnimated(){return this._element.classList.contains("slide")}_getActive(){return ye.findOne(ei,this._element)}_getItems(){return ye.find(ti,this._element)}_clearInterval(){this._interval&&(clearInterval(this._interval),this._interval=null)}_directionToOrder(t){return Yt()?t===We?Be:He:t===We?He:Be}_orderToDirection(t){return Yt()?t===Be?We:ze:t===Be?ze:We}static jQueryInterface(t){return this.each((function(){const e=oi.getOrCreateInstance(this,t);if("number"!=typeof t){if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}else e.to(t)}))}}ue.on(document,Ue,"[data-bs-slide], [data-bs-slide-to]",(function(t){const e=ye.getElementFromSelector(this);if(!e||!e.classList.contains(Ge))return;t.preventDefault();const i=oi.getOrCreateInstance(e),n=this.getAttribute("data-bs-slide-to");return n?(i.to(n),void i._maybeEnableCycle()):"next"===ge.getDataAttribute(this,"slide")?(i.next(),void i._maybeEnableCycle()):(i.prev(),void i._maybeEnableCycle())})),ue.on(window,Xe,(()=>{const t=ye.find('[data-bs-ride="carousel"]');for(const e of t)oi.getOrCreateInstance(e)})),Kt(oi);const ri=".bs.collapse",ai=`show${ri}`,li=`shown${ri}`,ci=`hide${ri}`,hi=`hidden${ri}`,di=`click${ri}.data-api`,ui="show",fi="collapse",pi="collapsing",mi=`:scope .${fi} .${fi}`,gi='[data-bs-toggle="collapse"]',_i={parent:null,toggle:!0},bi={parent:"(null|element)",toggle:"boolean"};class vi extends be{constructor(t,e){super(t,e),this._isTransitioning=!1,this._triggerArray=[];const i=ye.find(gi);for(const t of i){const e=ye.getSelectorFromElement(t),i=ye.find(e).filter((t=>t===this._element));null!==e&&i.length&&this._triggerArray.push(t)}this._initializeChildren(),this._config.parent||this._addAriaAndCollapsedClass(this._triggerArray,this._isShown()),this._config.toggle&&this.toggle()}static get Default(){return _i}static get DefaultType(){return bi}static get NAME(){return"collapse"}toggle(){this._isShown()?this.hide():this.show()}show(){if(this._isTransitioning||this._isShown())return;let t=[];if(this._config.parent&&(t=this._getFirstLevelChildren(".collapse.show, .collapse.collapsing").filter((t=>t!==this._element)).map((t=>vi.getOrCreateInstance(t,{toggle:!1})))),t.length&&t[0]._isTransitioning)return;if(ue.trigger(this._element,ai).defaultPrevented)return;for(const e of t)e.hide();const e=this._getDimension();this._element.classList.remove(fi),this._element.classList.add(pi),this._element.style[e]=0,this._addAriaAndCollapsedClass(this._triggerArray,!0),this._isTransitioning=!0;const i=`scroll${e[0].toUpperCase()+e.slice(1)}`;this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(pi),this._element.classList.add(fi,ui),this._element.style[e]="",ue.trigger(this._element,li)}),this._element,!0),this._element.style[e]=`${this._element[i]}px`}hide(){if(this._isTransitioning||!this._isShown())return;if(ue.trigger(this._element,ci).defaultPrevented)return;const t=this._getDimension();this._element.style[t]=`${this._element.getBoundingClientRect()[t]}px`,Rt(this._element),this._element.classList.add(pi),this._element.classList.remove(fi,ui);for(const t of this._triggerArray){const e=ye.getElementFromSelector(t);e&&!this._isShown(e)&&this._addAriaAndCollapsedClass([t],!1)}this._isTransitioning=!0,this._element.style[t]="",this._queueCallback((()=>{this._isTransitioning=!1,this._element.classList.remove(pi),this._element.classList.add(fi),ue.trigger(this._element,hi)}),this._element,!0)}_isShown(t=this._element){return t.classList.contains(ui)}_configAfterMerge(t){return t.toggle=Boolean(t.toggle),t.parent=Ft(t.parent),t}_getDimension(){return this._element.classList.contains("collapse-horizontal")?"width":"height"}_initializeChildren(){if(!this._config.parent)return;const t=this._getFirstLevelChildren(gi);for(const e of t){const t=ye.getElementFromSelector(e);t&&this._addAriaAndCollapsedClass([e],this._isShown(t))}}_getFirstLevelChildren(t){const e=ye.find(mi,this._config.parent);return ye.find(t,this._config.parent).filter((t=>!e.includes(t)))}_addAriaAndCollapsedClass(t,e){if(t.length)for(const i of t)i.classList.toggle("collapsed",!e),i.setAttribute("aria-expanded",e)}static jQueryInterface(t){const e={};return"string"==typeof t&&/show|hide/.test(t)&&(e.toggle=!1),this.each((function(){const i=vi.getOrCreateInstance(this,e);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t]()}}))}}ue.on(document,di,gi,(function(t){("A"===t.target.tagName||t.delegateTarget&&"A"===t.delegateTarget.tagName)&&t.preventDefault();for(const t of ye.getMultipleElementsFromSelector(this))vi.getOrCreateInstance(t,{toggle:!1}).toggle()})),Kt(vi);const yi="dropdown",wi=".bs.dropdown",Ei=".data-api",Ai="ArrowUp",Ti="ArrowDown",Ci=`hide${wi}`,Oi=`hidden${wi}`,xi=`show${wi}`,ki=`shown${wi}`,Li=`click${wi}${Ei}`,Si=`keydown${wi}${Ei}`,Di=`keyup${wi}${Ei}`,$i="show",Ii='[data-bs-toggle="dropdown"]:not(.disabled):not(:disabled)',Ni=`${Ii}.${$i}`,Pi=".dropdown-menu",Mi=Yt()?"top-end":"top-start",ji=Yt()?"top-start":"top-end",Fi=Yt()?"bottom-end":"bottom-start",Hi=Yt()?"bottom-start":"bottom-end",Bi=Yt()?"left-start":"right-start",Wi=Yt()?"right-start":"left-start",zi={autoClose:!0,boundary:"clippingParents",display:"dynamic",offset:[0,2],popperConfig:null,reference:"toggle"},Ri={autoClose:"(boolean|string)",boundary:"(string|element)",display:"string",offset:"(array|string|function)",popperConfig:"(null|object|function)",reference:"(string|element|object)"};class qi extends be{constructor(t,e){super(t,e),this._popper=null,this._parent=this._element.parentNode,this._menu=ye.next(this._element,Pi)[0]||ye.prev(this._element,Pi)[0]||ye.findOne(Pi,this._parent),this._inNavbar=this._detectNavbar()}static get Default(){return zi}static get DefaultType(){return Ri}static get NAME(){return yi}toggle(){return this._isShown()?this.hide():this.show()}show(){if(Bt(this._element)||this._isShown())return;const t={relatedTarget:this._element};if(!ue.trigger(this._element,xi,t).defaultPrevented){if(this._createPopper(),"ontouchstart"in document.documentElement&&!this._parent.closest(".navbar-nav"))for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._element.focus(),this._element.setAttribute("aria-expanded",!0),this._menu.classList.add($i),this._element.classList.add($i),ue.trigger(this._element,ki,t)}}hide(){if(Bt(this._element)||!this._isShown())return;const t={relatedTarget:this._element};this._completeHide(t)}dispose(){this._popper&&this._popper.destroy(),super.dispose()}update(){this._inNavbar=this._detectNavbar(),this._popper&&this._popper.update()}_completeHide(t){if(!ue.trigger(this._element,Ci,t).defaultPrevented){if("ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._popper&&this._popper.destroy(),this._menu.classList.remove($i),this._element.classList.remove($i),this._element.setAttribute("aria-expanded","false"),ge.removeDataAttribute(this._menu,"popper"),ue.trigger(this._element,Oi,t)}}_getConfig(t){if("object"==typeof(t=super._getConfig(t)).reference&&!jt(t.reference)&&"function"!=typeof t.reference.getBoundingClientRect)throw new TypeError(`${yi.toUpperCase()}: Option "reference" provided type "object" without a required "getBoundingClientRect" method.`);return t}_createPopper(){if(void 0===e)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");let t=this._element;"parent"===this._config.reference?t=this._parent:jt(this._config.reference)?t=Ft(this._config.reference):"object"==typeof this._config.reference&&(t=this._config.reference);const i=this._getPopperConfig();this._popper=St(t,this._menu,i)}_isShown(){return this._menu.classList.contains($i)}_getPlacement(){const t=this._parent;if(t.classList.contains("dropend"))return Bi;if(t.classList.contains("dropstart"))return Wi;if(t.classList.contains("dropup-center"))return"top";if(t.classList.contains("dropdown-center"))return"bottom";const e="end"===getComputedStyle(this._menu).getPropertyValue("--bs-position").trim();return t.classList.contains("dropup")?e?ji:Mi:e?Hi:Fi}_detectNavbar(){return null!==this._element.closest(".navbar")}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_getPopperConfig(){const t={placement:this._getPlacement(),modifiers:[{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"offset",options:{offset:this._getOffset()}}]};return(this._inNavbar||"static"===this._config.display)&&(ge.setDataAttribute(this._menu,"popper","static"),t.modifiers=[{name:"applyStyles",enabled:!1}]),{...t,...Qt(this._config.popperConfig,[t])}}_selectMenuItem({key:t,target:e}){const i=ye.find(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)",this._menu).filter((t=>Ht(t)));i.length&&Ut(i,e,t===Ti,!i.includes(e)).focus()}static jQueryInterface(t){return this.each((function(){const e=qi.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}static clearMenus(t){if(2===t.button||"keyup"===t.type&&"Tab"!==t.key)return;const e=ye.find(Ni);for(const i of e){const e=qi.getInstance(i);if(!e||!1===e._config.autoClose)continue;const n=t.composedPath(),s=n.includes(e._menu);if(n.includes(e._element)||"inside"===e._config.autoClose&&!s||"outside"===e._config.autoClose&&s)continue;if(e._menu.contains(t.target)&&("keyup"===t.type&&"Tab"===t.key||/input|select|option|textarea|form/i.test(t.target.tagName)))continue;const o={relatedTarget:e._element};"click"===t.type&&(o.clickEvent=t),e._completeHide(o)}}static dataApiKeydownHandler(t){const e=/input|textarea/i.test(t.target.tagName),i="Escape"===t.key,n=[Ai,Ti].includes(t.key);if(!n&&!i)return;if(e&&!i)return;t.preventDefault();const s=this.matches(Ii)?this:ye.prev(this,Ii)[0]||ye.next(this,Ii)[0]||ye.findOne(Ii,t.delegateTarget.parentNode),o=qi.getOrCreateInstance(s);if(n)return t.stopPropagation(),o.show(),void o._selectMenuItem(t);o._isShown()&&(t.stopPropagation(),o.hide(),s.focus())}}ue.on(document,Si,Ii,qi.dataApiKeydownHandler),ue.on(document,Si,Pi,qi.dataApiKeydownHandler),ue.on(document,Li,qi.clearMenus),ue.on(document,Di,qi.clearMenus),ue.on(document,Li,Ii,(function(t){t.preventDefault(),qi.getOrCreateInstance(this).toggle()})),Kt(qi);const Vi="backdrop",Yi="show",Ki=`mousedown.bs.${Vi}`,Qi={className:"modal-backdrop",clickCallback:null,isAnimated:!1,isVisible:!0,rootElement:"body"},Xi={className:"string",clickCallback:"(function|null)",isAnimated:"boolean",isVisible:"boolean",rootElement:"(element|string)"};class Ui extends _e{constructor(t){super(),this._config=this._getConfig(t),this._isAppended=!1,this._element=null}static get Default(){return Qi}static get DefaultType(){return Xi}static get NAME(){return Vi}show(t){if(!this._config.isVisible)return void Qt(t);this._append();const e=this._getElement();this._config.isAnimated&&Rt(e),e.classList.add(Yi),this._emulateAnimation((()=>{Qt(t)}))}hide(t){this._config.isVisible?(this._getElement().classList.remove(Yi),this._emulateAnimation((()=>{this.dispose(),Qt(t)}))):Qt(t)}dispose(){this._isAppended&&(ue.off(this._element,Ki),this._element.remove(),this._isAppended=!1)}_getElement(){if(!this._element){const t=document.createElement("div");t.className=this._config.className,this._config.isAnimated&&t.classList.add("fade"),this._element=t}return this._element}_configAfterMerge(t){return t.rootElement=Ft(t.rootElement),t}_append(){if(this._isAppended)return;const t=this._getElement();this._config.rootElement.append(t),ue.on(t,Ki,(()=>{Qt(this._config.clickCallback)})),this._isAppended=!0}_emulateAnimation(t){Xt(t,this._getElement(),this._config.isAnimated)}}const Gi=".bs.focustrap",Ji=`focusin${Gi}`,Zi=`keydown.tab${Gi}`,tn="backward",en={autofocus:!0,trapElement:null},nn={autofocus:"boolean",trapElement:"element"};class sn extends _e{constructor(t){super(),this._config=this._getConfig(t),this._isActive=!1,this._lastTabNavDirection=null}static get Default(){return en}static get DefaultType(){return nn}static get NAME(){return"focustrap"}activate(){this._isActive||(this._config.autofocus&&this._config.trapElement.focus(),ue.off(document,Gi),ue.on(document,Ji,(t=>this._handleFocusin(t))),ue.on(document,Zi,(t=>this._handleKeydown(t))),this._isActive=!0)}deactivate(){this._isActive&&(this._isActive=!1,ue.off(document,Gi))}_handleFocusin(t){const{trapElement:e}=this._config;if(t.target===document||t.target===e||e.contains(t.target))return;const i=ye.focusableChildren(e);0===i.length?e.focus():this._lastTabNavDirection===tn?i[i.length-1].focus():i[0].focus()}_handleKeydown(t){"Tab"===t.key&&(this._lastTabNavDirection=t.shiftKey?tn:"forward")}}const on=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",rn=".sticky-top",an="padding-right",ln="margin-right";class cn{constructor(){this._element=document.body}getWidth(){const t=document.documentElement.clientWidth;return Math.abs(window.innerWidth-t)}hide(){const t=this.getWidth();this._disableOverFlow(),this._setElementAttributes(this._element,an,(e=>e+t)),this._setElementAttributes(on,an,(e=>e+t)),this._setElementAttributes(rn,ln,(e=>e-t))}reset(){this._resetElementAttributes(this._element,"overflow"),this._resetElementAttributes(this._element,an),this._resetElementAttributes(on,an),this._resetElementAttributes(rn,ln)}isOverflowing(){return this.getWidth()>0}_disableOverFlow(){this._saveInitialAttribute(this._element,"overflow"),this._element.style.overflow="hidden"}_setElementAttributes(t,e,i){const n=this.getWidth();this._applyManipulationCallback(t,(t=>{if(t!==this._element&&window.innerWidth>t.clientWidth+n)return;this._saveInitialAttribute(t,e);const s=window.getComputedStyle(t).getPropertyValue(e);t.style.setProperty(e,`${i(Number.parseFloat(s))}px`)}))}_saveInitialAttribute(t,e){const i=t.style.getPropertyValue(e);i&&ge.setDataAttribute(t,e,i)}_resetElementAttributes(t,e){this._applyManipulationCallback(t,(t=>{const i=ge.getDataAttribute(t,e);null!==i?(ge.removeDataAttribute(t,e),t.style.setProperty(e,i)):t.style.removeProperty(e)}))}_applyManipulationCallback(t,e){if(jt(t))e(t);else for(const i of ye.find(t,this._element))e(i)}}const hn=".bs.modal",dn=`hide${hn}`,un=`hidePrevented${hn}`,fn=`hidden${hn}`,pn=`show${hn}`,mn=`shown${hn}`,gn=`resize${hn}`,_n=`click.dismiss${hn}`,bn=`mousedown.dismiss${hn}`,vn=`keydown.dismiss${hn}`,yn=`click${hn}.data-api`,wn="modal-open",En="show",An="modal-static",Tn={backdrop:!0,focus:!0,keyboard:!0},Cn={backdrop:"(boolean|string)",focus:"boolean",keyboard:"boolean"};class On extends be{constructor(t,e){super(t,e),this._dialog=ye.findOne(".modal-dialog",this._element),this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._isShown=!1,this._isTransitioning=!1,this._scrollBar=new cn,this._addEventListeners()}static get Default(){return Tn}static get DefaultType(){return Cn}static get NAME(){return"modal"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||this._isTransitioning||ue.trigger(this._element,pn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._isTransitioning=!0,this._scrollBar.hide(),document.body.classList.add(wn),this._adjustDialog(),this._backdrop.show((()=>this._showElement(t))))}hide(){this._isShown&&!this._isTransitioning&&(ue.trigger(this._element,dn).defaultPrevented||(this._isShown=!1,this._isTransitioning=!0,this._focustrap.deactivate(),this._element.classList.remove(En),this._queueCallback((()=>this._hideModal()),this._element,this._isAnimated())))}dispose(){ue.off(window,hn),ue.off(this._dialog,hn),this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}handleUpdate(){this._adjustDialog()}_initializeBackDrop(){return new Ui({isVisible:Boolean(this._config.backdrop),isAnimated:this._isAnimated()})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_showElement(t){document.body.contains(this._element)||document.body.append(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.scrollTop=0;const e=ye.findOne(".modal-body",this._dialog);e&&(e.scrollTop=0),Rt(this._element),this._element.classList.add(En),this._queueCallback((()=>{this._config.focus&&this._focustrap.activate(),this._isTransitioning=!1,ue.trigger(this._element,mn,{relatedTarget:t})}),this._dialog,this._isAnimated())}_addEventListeners(){ue.on(this._element,vn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():this._triggerBackdropTransition())})),ue.on(window,gn,(()=>{this._isShown&&!this._isTransitioning&&this._adjustDialog()})),ue.on(this._element,bn,(t=>{ue.one(this._element,_n,(e=>{this._element===t.target&&this._element===e.target&&("static"!==this._config.backdrop?this._config.backdrop&&this.hide():this._triggerBackdropTransition())}))}))}_hideModal(){this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._backdrop.hide((()=>{document.body.classList.remove(wn),this._resetAdjustments(),this._scrollBar.reset(),ue.trigger(this._element,fn)}))}_isAnimated(){return this._element.classList.contains("fade")}_triggerBackdropTransition(){if(ue.trigger(this._element,un).defaultPrevented)return;const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._element.style.overflowY;"hidden"===e||this._element.classList.contains(An)||(t||(this._element.style.overflowY="hidden"),this._element.classList.add(An),this._queueCallback((()=>{this._element.classList.remove(An),this._queueCallback((()=>{this._element.style.overflowY=e}),this._dialog)}),this._dialog),this._element.focus())}_adjustDialog(){const t=this._element.scrollHeight>document.documentElement.clientHeight,e=this._scrollBar.getWidth(),i=e>0;if(i&&!t){const t=Yt()?"paddingLeft":"paddingRight";this._element.style[t]=`${e}px`}if(!i&&t){const t=Yt()?"paddingRight":"paddingLeft";this._element.style[t]=`${e}px`}}_resetAdjustments(){this._element.style.paddingLeft="",this._element.style.paddingRight=""}static jQueryInterface(t,e){return this.each((function(){const i=On.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===i[t])throw new TypeError(`No method named "${t}"`);i[t](e)}}))}}ue.on(document,yn,'[data-bs-toggle="modal"]',(function(t){const e=ye.getElementFromSelector(this);["A","AREA"].includes(this.tagName)&&t.preventDefault(),ue.one(e,pn,(t=>{t.defaultPrevented||ue.one(e,fn,(()=>{Ht(this)&&this.focus()}))}));const i=ye.findOne(".modal.show");i&&On.getInstance(i).hide(),On.getOrCreateInstance(e).toggle(this)})),we(On),Kt(On);const xn=".bs.offcanvas",kn=".data-api",Ln=`load${xn}${kn}`,Sn="show",Dn="showing",$n="hiding",In=".offcanvas.show",Nn=`show${xn}`,Pn=`shown${xn}`,Mn=`hide${xn}`,jn=`hidePrevented${xn}`,Fn=`hidden${xn}`,Hn=`resize${xn}`,Bn=`click${xn}${kn}`,Wn=`keydown.dismiss${xn}`,zn={backdrop:!0,keyboard:!0,scroll:!1},Rn={backdrop:"(boolean|string)",keyboard:"boolean",scroll:"boolean"};class qn extends be{constructor(t,e){super(t,e),this._isShown=!1,this._backdrop=this._initializeBackDrop(),this._focustrap=this._initializeFocusTrap(),this._addEventListeners()}static get Default(){return zn}static get DefaultType(){return Rn}static get NAME(){return"offcanvas"}toggle(t){return this._isShown?this.hide():this.show(t)}show(t){this._isShown||ue.trigger(this._element,Nn,{relatedTarget:t}).defaultPrevented||(this._isShown=!0,this._backdrop.show(),this._config.scroll||(new cn).hide(),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),this._element.classList.add(Dn),this._queueCallback((()=>{this._config.scroll&&!this._config.backdrop||this._focustrap.activate(),this._element.classList.add(Sn),this._element.classList.remove(Dn),ue.trigger(this._element,Pn,{relatedTarget:t})}),this._element,!0))}hide(){this._isShown&&(ue.trigger(this._element,Mn).defaultPrevented||(this._focustrap.deactivate(),this._element.blur(),this._isShown=!1,this._element.classList.add($n),this._backdrop.hide(),this._queueCallback((()=>{this._element.classList.remove(Sn,$n),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._config.scroll||(new cn).reset(),ue.trigger(this._element,Fn)}),this._element,!0)))}dispose(){this._backdrop.dispose(),this._focustrap.deactivate(),super.dispose()}_initializeBackDrop(){const t=Boolean(this._config.backdrop);return new Ui({className:"offcanvas-backdrop",isVisible:t,isAnimated:!0,rootElement:this._element.parentNode,clickCallback:t?()=>{"static"!==this._config.backdrop?this.hide():ue.trigger(this._element,jn)}:null})}_initializeFocusTrap(){return new sn({trapElement:this._element})}_addEventListeners(){ue.on(this._element,Wn,(t=>{"Escape"===t.key&&(this._config.keyboard?this.hide():ue.trigger(this._element,jn))}))}static jQueryInterface(t){return this.each((function(){const e=qn.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}ue.on(document,Bn,'[data-bs-toggle="offcanvas"]',(function(t){const e=ye.getElementFromSelector(this);if(["A","AREA"].includes(this.tagName)&&t.preventDefault(),Bt(this))return;ue.one(e,Fn,(()=>{Ht(this)&&this.focus()}));const i=ye.findOne(In);i&&i!==e&&qn.getInstance(i).hide(),qn.getOrCreateInstance(e).toggle(this)})),ue.on(window,Ln,(()=>{for(const t of ye.find(In))qn.getOrCreateInstance(t).show()})),ue.on(window,Hn,(()=>{for(const t of ye.find("[aria-modal][class*=show][class*=offcanvas-]"))"fixed"!==getComputedStyle(t).position&&qn.getOrCreateInstance(t).hide()})),we(qn),Kt(qn);const Vn={"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},Yn=new Set(["background","cite","href","itemtype","longdesc","poster","src","xlink:href"]),Kn=/^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i,Qn=(t,e)=>{const i=t.nodeName.toLowerCase();return e.includes(i)?!Yn.has(i)||Boolean(Kn.test(t.nodeValue)):e.filter((t=>t instanceof RegExp)).some((t=>t.test(i)))},Xn={allowList:Vn,content:{},extraClass:"",html:!1,sanitize:!0,sanitizeFn:null,template:"
"},Un={allowList:"object",content:"object",extraClass:"(string|function)",html:"boolean",sanitize:"boolean",sanitizeFn:"(null|function)",template:"string"},Gn={entry:"(string|element|function|null)",selector:"(string|element)"};class Jn extends _e{constructor(t){super(),this._config=this._getConfig(t)}static get Default(){return Xn}static get DefaultType(){return Un}static get NAME(){return"TemplateFactory"}getContent(){return Object.values(this._config.content).map((t=>this._resolvePossibleFunction(t))).filter(Boolean)}hasContent(){return this.getContent().length>0}changeContent(t){return this._checkContent(t),this._config.content={...this._config.content,...t},this}toHtml(){const t=document.createElement("div");t.innerHTML=this._maybeSanitize(this._config.template);for(const[e,i]of Object.entries(this._config.content))this._setContent(t,i,e);const e=t.children[0],i=this._resolvePossibleFunction(this._config.extraClass);return i&&e.classList.add(...i.split(" ")),e}_typeCheckConfig(t){super._typeCheckConfig(t),this._checkContent(t.content)}_checkContent(t){for(const[e,i]of Object.entries(t))super._typeCheckConfig({selector:e,entry:i},Gn)}_setContent(t,e,i){const n=ye.findOne(i,t);n&&((e=this._resolvePossibleFunction(e))?jt(e)?this._putElementInTemplate(Ft(e),n):this._config.html?n.innerHTML=this._maybeSanitize(e):n.textContent=e:n.remove())}_maybeSanitize(t){return this._config.sanitize?function(t,e,i){if(!t.length)return t;if(i&&"function"==typeof i)return i(t);const n=(new window.DOMParser).parseFromString(t,"text/html"),s=[].concat(...n.body.querySelectorAll("*"));for(const t of s){const i=t.nodeName.toLowerCase();if(!Object.keys(e).includes(i)){t.remove();continue}const n=[].concat(...t.attributes),s=[].concat(e["*"]||[],e[i]||[]);for(const e of n)Qn(e,s)||t.removeAttribute(e.nodeName)}return n.body.innerHTML}(t,this._config.allowList,this._config.sanitizeFn):t}_resolvePossibleFunction(t){return Qt(t,[this])}_putElementInTemplate(t,e){if(this._config.html)return e.innerHTML="",void e.append(t);e.textContent=t.textContent}}const Zn=new Set(["sanitize","allowList","sanitizeFn"]),ts="fade",es="show",is=".modal",ns="hide.bs.modal",ss="hover",os="focus",rs={AUTO:"auto",TOP:"top",RIGHT:Yt()?"left":"right",BOTTOM:"bottom",LEFT:Yt()?"right":"left"},as={allowList:Vn,animation:!0,boundary:"clippingParents",container:!1,customClass:"",delay:0,fallbackPlacements:["top","right","bottom","left"],html:!1,offset:[0,6],placement:"top",popperConfig:null,sanitize:!0,sanitizeFn:null,selector:!1,template:'',title:"",trigger:"hover focus"},ls={allowList:"object",animation:"boolean",boundary:"(string|element)",container:"(string|element|boolean)",customClass:"(string|function)",delay:"(number|object)",fallbackPlacements:"array",html:"boolean",offset:"(array|string|function)",placement:"(string|function)",popperConfig:"(null|object|function)",sanitize:"boolean",sanitizeFn:"(null|function)",selector:"(string|boolean)",template:"string",title:"(string|element|function)",trigger:"string"};class cs extends be{constructor(t,i){if(void 0===e)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");super(t,i),this._isEnabled=!0,this._timeout=0,this._isHovered=null,this._activeTrigger={},this._popper=null,this._templateFactory=null,this._newContent=null,this.tip=null,this._setListeners(),this._config.selector||this._fixTitle()}static get Default(){return as}static get DefaultType(){return ls}static get NAME(){return"tooltip"}enable(){this._isEnabled=!0}disable(){this._isEnabled=!1}toggleEnabled(){this._isEnabled=!this._isEnabled}toggle(){this._isEnabled&&(this._activeTrigger.click=!this._activeTrigger.click,this._isShown()?this._leave():this._enter())}dispose(){clearTimeout(this._timeout),ue.off(this._element.closest(is),ns,this._hideModalHandler),this._element.getAttribute("data-bs-original-title")&&this._element.setAttribute("title",this._element.getAttribute("data-bs-original-title")),this._disposePopper(),super.dispose()}show(){if("none"===this._element.style.display)throw new Error("Please use show on visible elements");if(!this._isWithContent()||!this._isEnabled)return;const t=ue.trigger(this._element,this.constructor.eventName("show")),e=(Wt(this._element)||this._element.ownerDocument.documentElement).contains(this._element);if(t.defaultPrevented||!e)return;this._disposePopper();const i=this._getTipElement();this._element.setAttribute("aria-describedby",i.getAttribute("id"));const{container:n}=this._config;if(this._element.ownerDocument.documentElement.contains(this.tip)||(n.append(i),ue.trigger(this._element,this.constructor.eventName("inserted"))),this._popper=this._createPopper(i),i.classList.add(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.on(t,"mouseover",zt);this._queueCallback((()=>{ue.trigger(this._element,this.constructor.eventName("shown")),!1===this._isHovered&&this._leave(),this._isHovered=!1}),this.tip,this._isAnimated())}hide(){if(this._isShown()&&!ue.trigger(this._element,this.constructor.eventName("hide")).defaultPrevented){if(this._getTipElement().classList.remove(es),"ontouchstart"in document.documentElement)for(const t of[].concat(...document.body.children))ue.off(t,"mouseover",zt);this._activeTrigger.click=!1,this._activeTrigger[os]=!1,this._activeTrigger[ss]=!1,this._isHovered=null,this._queueCallback((()=>{this._isWithActiveTrigger()||(this._isHovered||this._disposePopper(),this._element.removeAttribute("aria-describedby"),ue.trigger(this._element,this.constructor.eventName("hidden")))}),this.tip,this._isAnimated())}}update(){this._popper&&this._popper.update()}_isWithContent(){return Boolean(this._getTitle())}_getTipElement(){return this.tip||(this.tip=this._createTipElement(this._newContent||this._getContentForTemplate())),this.tip}_createTipElement(t){const e=this._getTemplateFactory(t).toHtml();if(!e)return null;e.classList.remove(ts,es),e.classList.add(`bs-${this.constructor.NAME}-auto`);const i=(t=>{do{t+=Math.floor(1e6*Math.random())}while(document.getElementById(t));return t})(this.constructor.NAME).toString();return e.setAttribute("id",i),this._isAnimated()&&e.classList.add(ts),e}setContent(t){this._newContent=t,this._isShown()&&(this._disposePopper(),this.show())}_getTemplateFactory(t){return this._templateFactory?this._templateFactory.changeContent(t):this._templateFactory=new Jn({...this._config,content:t,extraClass:this._resolvePossibleFunction(this._config.customClass)}),this._templateFactory}_getContentForTemplate(){return{".tooltip-inner":this._getTitle()}}_getTitle(){return this._resolvePossibleFunction(this._config.title)||this._element.getAttribute("data-bs-original-title")}_initializeOnDelegatedTarget(t){return this.constructor.getOrCreateInstance(t.delegateTarget,this._getDelegateConfig())}_isAnimated(){return this._config.animation||this.tip&&this.tip.classList.contains(ts)}_isShown(){return this.tip&&this.tip.classList.contains(es)}_createPopper(t){const e=Qt(this._config.placement,[this,t,this._element]),i=rs[e.toUpperCase()];return St(this._element,t,this._getPopperConfig(i))}_getOffset(){const{offset:t}=this._config;return"string"==typeof t?t.split(",").map((t=>Number.parseInt(t,10))):"function"==typeof t?e=>t(e,this._element):t}_resolvePossibleFunction(t){return Qt(t,[this._element])}_getPopperConfig(t){const e={placement:t,modifiers:[{name:"flip",options:{fallbackPlacements:this._config.fallbackPlacements}},{name:"offset",options:{offset:this._getOffset()}},{name:"preventOverflow",options:{boundary:this._config.boundary}},{name:"arrow",options:{element:`.${this.constructor.NAME}-arrow`}},{name:"preSetPlacement",enabled:!0,phase:"beforeMain",fn:t=>{this._getTipElement().setAttribute("data-popper-placement",t.state.placement)}}]};return{...e,...Qt(this._config.popperConfig,[e])}}_setListeners(){const t=this._config.trigger.split(" ");for(const e of t)if("click"===e)ue.on(this._element,this.constructor.eventName("click"),this._config.selector,(t=>{this._initializeOnDelegatedTarget(t).toggle()}));else if("manual"!==e){const t=e===ss?this.constructor.eventName("mouseenter"):this.constructor.eventName("focusin"),i=e===ss?this.constructor.eventName("mouseleave"):this.constructor.eventName("focusout");ue.on(this._element,t,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusin"===t.type?os:ss]=!0,e._enter()})),ue.on(this._element,i,this._config.selector,(t=>{const e=this._initializeOnDelegatedTarget(t);e._activeTrigger["focusout"===t.type?os:ss]=e._element.contains(t.relatedTarget),e._leave()}))}this._hideModalHandler=()=>{this._element&&this.hide()},ue.on(this._element.closest(is),ns,this._hideModalHandler)}_fixTitle(){const t=this._element.getAttribute("title");t&&(this._element.getAttribute("aria-label")||this._element.textContent.trim()||this._element.setAttribute("aria-label",t),this._element.setAttribute("data-bs-original-title",t),this._element.removeAttribute("title"))}_enter(){this._isShown()||this._isHovered?this._isHovered=!0:(this._isHovered=!0,this._setTimeout((()=>{this._isHovered&&this.show()}),this._config.delay.show))}_leave(){this._isWithActiveTrigger()||(this._isHovered=!1,this._setTimeout((()=>{this._isHovered||this.hide()}),this._config.delay.hide))}_setTimeout(t,e){clearTimeout(this._timeout),this._timeout=setTimeout(t,e)}_isWithActiveTrigger(){return Object.values(this._activeTrigger).includes(!0)}_getConfig(t){const e=ge.getDataAttributes(this._element);for(const t of Object.keys(e))Zn.has(t)&&delete e[t];return t={...e,..."object"==typeof t&&t?t:{}},t=this._mergeConfigObj(t),t=this._configAfterMerge(t),this._typeCheckConfig(t),t}_configAfterMerge(t){return t.container=!1===t.container?document.body:Ft(t.container),"number"==typeof t.delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),t}_getDelegateConfig(){const t={};for(const[e,i]of Object.entries(this._config))this.constructor.Default[e]!==i&&(t[e]=i);return t.selector=!1,t.trigger="manual",t}_disposePopper(){this._popper&&(this._popper.destroy(),this._popper=null),this.tip&&(this.tip.remove(),this.tip=null)}static jQueryInterface(t){return this.each((function(){const e=cs.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(cs);const hs={...cs.Default,content:"",offset:[0,8],placement:"right",template:'',trigger:"click"},ds={...cs.DefaultType,content:"(null|string|element|function)"};class us extends cs{static get Default(){return hs}static get DefaultType(){return ds}static get NAME(){return"popover"}_isWithContent(){return this._getTitle()||this._getContent()}_getContentForTemplate(){return{".popover-header":this._getTitle(),".popover-body":this._getContent()}}_getContent(){return this._resolvePossibleFunction(this._config.content)}static jQueryInterface(t){return this.each((function(){const e=us.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t]()}}))}}Kt(us);const fs=".bs.scrollspy",ps=`activate${fs}`,ms=`click${fs}`,gs=`load${fs}.data-api`,_s="active",bs="[href]",vs=".nav-link",ys=`${vs}, .nav-item > ${vs}, .list-group-item`,ws={offset:null,rootMargin:"0px 0px -25%",smoothScroll:!1,target:null,threshold:[.1,.5,1]},Es={offset:"(number|null)",rootMargin:"string",smoothScroll:"boolean",target:"element",threshold:"array"};class As extends be{constructor(t,e){super(t,e),this._targetLinks=new Map,this._observableSections=new Map,this._rootElement="visible"===getComputedStyle(this._element).overflowY?null:this._element,this._activeTarget=null,this._observer=null,this._previousScrollData={visibleEntryTop:0,parentScrollTop:0},this.refresh()}static get Default(){return ws}static get DefaultType(){return Es}static get NAME(){return"scrollspy"}refresh(){this._initializeTargetsAndObservables(),this._maybeEnableSmoothScroll(),this._observer?this._observer.disconnect():this._observer=this._getNewObserver();for(const t of this._observableSections.values())this._observer.observe(t)}dispose(){this._observer.disconnect(),super.dispose()}_configAfterMerge(t){return t.target=Ft(t.target)||document.body,t.rootMargin=t.offset?`${t.offset}px 0px -30%`:t.rootMargin,"string"==typeof t.threshold&&(t.threshold=t.threshold.split(",").map((t=>Number.parseFloat(t)))),t}_maybeEnableSmoothScroll(){this._config.smoothScroll&&(ue.off(this._config.target,ms),ue.on(this._config.target,ms,bs,(t=>{const e=this._observableSections.get(t.target.hash);if(e){t.preventDefault();const i=this._rootElement||window,n=e.offsetTop-this._element.offsetTop;if(i.scrollTo)return void i.scrollTo({top:n,behavior:"smooth"});i.scrollTop=n}})))}_getNewObserver(){const t={root:this._rootElement,threshold:this._config.threshold,rootMargin:this._config.rootMargin};return new IntersectionObserver((t=>this._observerCallback(t)),t)}_observerCallback(t){const e=t=>this._targetLinks.get(`#${t.target.id}`),i=t=>{this._previousScrollData.visibleEntryTop=t.target.offsetTop,this._process(e(t))},n=(this._rootElement||document.documentElement).scrollTop,s=n>=this._previousScrollData.parentScrollTop;this._previousScrollData.parentScrollTop=n;for(const o of t){if(!o.isIntersecting){this._activeTarget=null,this._clearActiveClass(e(o));continue}const t=o.target.offsetTop>=this._previousScrollData.visibleEntryTop;if(s&&t){if(i(o),!n)return}else s||t||i(o)}}_initializeTargetsAndObservables(){this._targetLinks=new Map,this._observableSections=new Map;const t=ye.find(bs,this._config.target);for(const e of t){if(!e.hash||Bt(e))continue;const t=ye.findOne(decodeURI(e.hash),this._element);Ht(t)&&(this._targetLinks.set(decodeURI(e.hash),e),this._observableSections.set(e.hash,t))}}_process(t){this._activeTarget!==t&&(this._clearActiveClass(this._config.target),this._activeTarget=t,t.classList.add(_s),this._activateParents(t),ue.trigger(this._element,ps,{relatedTarget:t}))}_activateParents(t){if(t.classList.contains("dropdown-item"))ye.findOne(".dropdown-toggle",t.closest(".dropdown")).classList.add(_s);else for(const e of ye.parents(t,".nav, .list-group"))for(const t of ye.prev(e,ys))t.classList.add(_s)}_clearActiveClass(t){t.classList.remove(_s);const e=ye.find(`${bs}.${_s}`,t);for(const t of e)t.classList.remove(_s)}static jQueryInterface(t){return this.each((function(){const e=As.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(window,gs,(()=>{for(const t of ye.find('[data-bs-spy="scroll"]'))As.getOrCreateInstance(t)})),Kt(As);const Ts=".bs.tab",Cs=`hide${Ts}`,Os=`hidden${Ts}`,xs=`show${Ts}`,ks=`shown${Ts}`,Ls=`click${Ts}`,Ss=`keydown${Ts}`,Ds=`load${Ts}`,$s="ArrowLeft",Is="ArrowRight",Ns="ArrowUp",Ps="ArrowDown",Ms="Home",js="End",Fs="active",Hs="fade",Bs="show",Ws=".dropdown-toggle",zs=`:not(${Ws})`,Rs='[data-bs-toggle="tab"], [data-bs-toggle="pill"], [data-bs-toggle="list"]',qs=`.nav-link${zs}, .list-group-item${zs}, [role="tab"]${zs}, ${Rs}`,Vs=`.${Fs}[data-bs-toggle="tab"], .${Fs}[data-bs-toggle="pill"], .${Fs}[data-bs-toggle="list"]`;class Ys extends be{constructor(t){super(t),this._parent=this._element.closest('.list-group, .nav, [role="tablist"]'),this._parent&&(this._setInitialAttributes(this._parent,this._getChildren()),ue.on(this._element,Ss,(t=>this._keydown(t))))}static get NAME(){return"tab"}show(){const t=this._element;if(this._elemIsActive(t))return;const e=this._getActiveElem(),i=e?ue.trigger(e,Cs,{relatedTarget:t}):null;ue.trigger(t,xs,{relatedTarget:e}).defaultPrevented||i&&i.defaultPrevented||(this._deactivate(e,t),this._activate(t,e))}_activate(t,e){t&&(t.classList.add(Fs),this._activate(ye.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.removeAttribute("tabindex"),t.setAttribute("aria-selected",!0),this._toggleDropDown(t,!0),ue.trigger(t,ks,{relatedTarget:e})):t.classList.add(Bs)}),t,t.classList.contains(Hs)))}_deactivate(t,e){t&&(t.classList.remove(Fs),t.blur(),this._deactivate(ye.getElementFromSelector(t)),this._queueCallback((()=>{"tab"===t.getAttribute("role")?(t.setAttribute("aria-selected",!1),t.setAttribute("tabindex","-1"),this._toggleDropDown(t,!1),ue.trigger(t,Os,{relatedTarget:e})):t.classList.remove(Bs)}),t,t.classList.contains(Hs)))}_keydown(t){if(![$s,Is,Ns,Ps,Ms,js].includes(t.key))return;t.stopPropagation(),t.preventDefault();const e=this._getChildren().filter((t=>!Bt(t)));let i;if([Ms,js].includes(t.key))i=e[t.key===Ms?0:e.length-1];else{const n=[Is,Ps].includes(t.key);i=Ut(e,t.target,n,!0)}i&&(i.focus({preventScroll:!0}),Ys.getOrCreateInstance(i).show())}_getChildren(){return ye.find(qs,this._parent)}_getActiveElem(){return this._getChildren().find((t=>this._elemIsActive(t)))||null}_setInitialAttributes(t,e){this._setAttributeIfNotExists(t,"role","tablist");for(const t of e)this._setInitialAttributesOnChild(t)}_setInitialAttributesOnChild(t){t=this._getInnerElement(t);const e=this._elemIsActive(t),i=this._getOuterElement(t);t.setAttribute("aria-selected",e),i!==t&&this._setAttributeIfNotExists(i,"role","presentation"),e||t.setAttribute("tabindex","-1"),this._setAttributeIfNotExists(t,"role","tab"),this._setInitialAttributesOnTargetPanel(t)}_setInitialAttributesOnTargetPanel(t){const e=ye.getElementFromSelector(t);e&&(this._setAttributeIfNotExists(e,"role","tabpanel"),t.id&&this._setAttributeIfNotExists(e,"aria-labelledby",`${t.id}`))}_toggleDropDown(t,e){const i=this._getOuterElement(t);if(!i.classList.contains("dropdown"))return;const n=(t,n)=>{const s=ye.findOne(t,i);s&&s.classList.toggle(n,e)};n(Ws,Fs),n(".dropdown-menu",Bs),i.setAttribute("aria-expanded",e)}_setAttributeIfNotExists(t,e,i){t.hasAttribute(e)||t.setAttribute(e,i)}_elemIsActive(t){return t.classList.contains(Fs)}_getInnerElement(t){return t.matches(qs)?t:ye.findOne(qs,t)}_getOuterElement(t){return t.closest(".nav-item, .list-group-item")||t}static jQueryInterface(t){return this.each((function(){const e=Ys.getOrCreateInstance(this);if("string"==typeof t){if(void 0===e[t]||t.startsWith("_")||"constructor"===t)throw new TypeError(`No method named "${t}"`);e[t]()}}))}}ue.on(document,Ls,Rs,(function(t){["A","AREA"].includes(this.tagName)&&t.preventDefault(),Bt(this)||Ys.getOrCreateInstance(this).show()})),ue.on(window,Ds,(()=>{for(const t of ye.find(Vs))Ys.getOrCreateInstance(t)})),Kt(Ys);const Ks=".bs.toast",Qs=`mouseover${Ks}`,Xs=`mouseout${Ks}`,Us=`focusin${Ks}`,Gs=`focusout${Ks}`,Js=`hide${Ks}`,Zs=`hidden${Ks}`,to=`show${Ks}`,eo=`shown${Ks}`,io="hide",no="show",so="showing",oo={animation:"boolean",autohide:"boolean",delay:"number"},ro={animation:!0,autohide:!0,delay:5e3};class ao extends be{constructor(t,e){super(t,e),this._timeout=null,this._hasMouseInteraction=!1,this._hasKeyboardInteraction=!1,this._setListeners()}static get Default(){return ro}static get DefaultType(){return oo}static get NAME(){return"toast"}show(){ue.trigger(this._element,to).defaultPrevented||(this._clearTimeout(),this._config.animation&&this._element.classList.add("fade"),this._element.classList.remove(io),Rt(this._element),this._element.classList.add(no,so),this._queueCallback((()=>{this._element.classList.remove(so),ue.trigger(this._element,eo),this._maybeScheduleHide()}),this._element,this._config.animation))}hide(){this.isShown()&&(ue.trigger(this._element,Js).defaultPrevented||(this._element.classList.add(so),this._queueCallback((()=>{this._element.classList.add(io),this._element.classList.remove(so,no),ue.trigger(this._element,Zs)}),this._element,this._config.animation)))}dispose(){this._clearTimeout(),this.isShown()&&this._element.classList.remove(no),super.dispose()}isShown(){return this._element.classList.contains(no)}_maybeScheduleHide(){this._config.autohide&&(this._hasMouseInteraction||this._hasKeyboardInteraction||(this._timeout=setTimeout((()=>{this.hide()}),this._config.delay)))}_onInteraction(t,e){switch(t.type){case"mouseover":case"mouseout":this._hasMouseInteraction=e;break;case"focusin":case"focusout":this._hasKeyboardInteraction=e}if(e)return void this._clearTimeout();const i=t.relatedTarget;this._element===i||this._element.contains(i)||this._maybeScheduleHide()}_setListeners(){ue.on(this._element,Qs,(t=>this._onInteraction(t,!0))),ue.on(this._element,Xs,(t=>this._onInteraction(t,!1))),ue.on(this._element,Us,(t=>this._onInteraction(t,!0))),ue.on(this._element,Gs,(t=>this._onInteraction(t,!1)))}_clearTimeout(){clearTimeout(this._timeout),this._timeout=null}static jQueryInterface(t){return this.each((function(){const e=ao.getOrCreateInstance(this,t);if("string"==typeof t){if(void 0===e[t])throw new TypeError(`No method named "${t}"`);e[t](this)}}))}}function lo(t){"loading"!=document.readyState?t():document.addEventListener("DOMContentLoaded",t)}we(ao),Kt(ao),lo((function(){[].slice.call(document.querySelectorAll('[data-bs-toggle="tooltip"]')).map((function(t){return new cs(t,{delay:{show:500,hide:100}})}))})),lo((function(){document.getElementById("pst-back-to-top").addEventListener("click",(function(){document.body.scrollTop=0,document.documentElement.scrollTop=0}))})),lo((function(){var t=document.getElementById("pst-back-to-top"),e=document.getElementsByClassName("bd-header")[0].getBoundingClientRect();window.addEventListener("scroll",(function(){this.oldScroll>this.scrollY&&this.scrollY>e.bottom?t.style.display="block":t.style.display="none",this.oldScroll=this.scrollY}))}))})(); +//# sourceMappingURL=bootstrap.js.map \ No newline at end of file diff --git a/_static/scripts/bootstrap.js.LICENSE.txt b/_static/scripts/bootstrap.js.LICENSE.txt new file mode 100644 index 0000000000..10f979d078 --- /dev/null +++ b/_static/scripts/bootstrap.js.LICENSE.txt @@ -0,0 +1,5 @@ +/*! + * Bootstrap v5.3.2 (https://getbootstrap.com/) + * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE) + */ diff --git a/_static/scripts/bootstrap.js.map b/_static/scripts/bootstrap.js.map new file mode 100644 index 0000000000..e5bc15752d --- /dev/null +++ b/_static/scripts/bootstrap.js.map @@ -0,0 +1 @@ +{"version":3,"file":"scripts/bootstrap.js","mappings":";mBACA,IAAIA,EAAsB,CCA1BA,EAAwB,CAACC,EAASC,KACjC,IAAI,IAAIC,KAAOD,EACXF,EAAoBI,EAAEF,EAAYC,KAASH,EAAoBI,EAAEH,EAASE,IAC5EE,OAAOC,eAAeL,EAASE,EAAK,CAAEI,YAAY,EAAMC,IAAKN,EAAWC,IAE1E,ECNDH,EAAwB,CAACS,EAAKC,IAAUL,OAAOM,UAAUC,eAAeC,KAAKJ,EAAKC,GCClFV,EAAyBC,IACH,oBAAXa,QAA0BA,OAAOC,aAC1CV,OAAOC,eAAeL,EAASa,OAAOC,YAAa,CAAEC,MAAO,WAE7DX,OAAOC,eAAeL,EAAS,aAAc,CAAEe,OAAO,GAAO,ipBCLvD,IAAI,EAAM,MACNC,EAAS,SACTC,EAAQ,QACRC,EAAO,OACPC,EAAO,OACPC,EAAiB,CAAC,EAAKJ,EAAQC,EAAOC,GACtCG,EAAQ,QACRC,EAAM,MACNC,EAAkB,kBAClBC,EAAW,WACXC,EAAS,SACTC,EAAY,YACZC,EAAmCP,EAAeQ,QAAO,SAAUC,EAAKC,GACjF,OAAOD,EAAIE,OAAO,CAACD,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAChE,GAAG,IACQ,EAA0B,GAAGS,OAAOX,EAAgB,CAACD,IAAOS,QAAO,SAAUC,EAAKC,GAC3F,OAAOD,EAAIE,OAAO,CAACD,EAAWA,EAAY,IAAMT,EAAOS,EAAY,IAAMR,GAC3E,GAAG,IAEQU,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAa,aACbC,EAAO,OACPC,EAAY,YAEZC,EAAc,cACdC,EAAQ,QACRC,EAAa,aACbC,EAAiB,CAACT,EAAYC,EAAMC,EAAWC,EAAYC,EAAMC,EAAWC,EAAaC,EAAOC,GC9B5F,SAASE,EAAYC,GAClC,OAAOA,GAAWA,EAAQC,UAAY,IAAIC,cAAgB,IAC5D,CCFe,SAASC,EAAUC,GAChC,GAAY,MAARA,EACF,OAAOC,OAGT,GAAwB,oBAApBD,EAAKE,WAAkC,CACzC,IAAIC,EAAgBH,EAAKG,cACzB,OAAOA,GAAgBA,EAAcC,aAAwBH,MAC/D,CAEA,OAAOD,CACT,CCTA,SAASK,EAAUL,GAEjB,OAAOA,aADUD,EAAUC,GAAMM,SACIN,aAAgBM,OACvD,CAEA,SAASC,EAAcP,GAErB,OAAOA,aADUD,EAAUC,GAAMQ,aACIR,aAAgBQ,WACvD,CAEA,SAASC,EAAaT,GAEpB,MAA0B,oBAAfU,aAKJV,aADUD,EAAUC,GAAMU,YACIV,aAAgBU,WACvD,CCwDA,SACEC,KAAM,cACNC,SAAS,EACTC,MAAO,QACPC,GA5EF,SAAqBC,GACnB,IAAIC,EAAQD,EAAKC,MACjB3D,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIS,EAAQJ,EAAMK,OAAOV,IAAS,CAAC,EAC/BW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EACxCf,EAAUoB,EAAME,SAASP,GAExBJ,EAAcX,IAAaD,EAAYC,KAO5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUR,GACxC,IAAI3C,EAAQsD,EAAWX,IAET,IAAV3C,EACF4B,EAAQ4B,gBAAgBb,GAExBf,EAAQ6B,aAAad,GAAgB,IAAV3C,EAAiB,GAAKA,EAErD,IACF,GACF,EAoDE0D,OAlDF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MACdY,EAAgB,CAClBlD,OAAQ,CACNmD,SAAUb,EAAMc,QAAQC,SACxB5D,KAAM,IACN6D,IAAK,IACLC,OAAQ,KAEVC,MAAO,CACLL,SAAU,YAEZlD,UAAW,CAAC,GASd,OAPAtB,OAAOkE,OAAOP,EAAME,SAASxC,OAAO0C,MAAOQ,EAAclD,QACzDsC,EAAMK,OAASO,EAEXZ,EAAME,SAASgB,OACjB7E,OAAOkE,OAAOP,EAAME,SAASgB,MAAMd,MAAOQ,EAAcM,OAGnD,WACL7E,OAAO4D,KAAKD,EAAME,UAAUC,SAAQ,SAAUR,GAC5C,IAAIf,EAAUoB,EAAME,SAASP,GACzBW,EAAaN,EAAMM,WAAWX,IAAS,CAAC,EAGxCS,EAFkB/D,OAAO4D,KAAKD,EAAMK,OAAOzD,eAAe+C,GAAQK,EAAMK,OAAOV,GAAQiB,EAAcjB,IAE7E9B,QAAO,SAAUuC,EAAOe,GAElD,OADAf,EAAMe,GAAY,GACXf,CACT,GAAG,CAAC,GAECb,EAAcX,IAAaD,EAAYC,KAI5CvC,OAAOkE,OAAO3B,EAAQwB,MAAOA,GAC7B/D,OAAO4D,KAAKK,GAAYH,SAAQ,SAAUiB,GACxCxC,EAAQ4B,gBAAgBY,EAC1B,IACF,GACF,CACF,EASEC,SAAU,CAAC,kBCjFE,SAASC,EAAiBvD,GACvC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCHO,IAAI,EAAMC,KAAKC,IACX,EAAMD,KAAKE,IACXC,EAAQH,KAAKG,MCFT,SAASC,IACtB,IAAIC,EAASC,UAAUC,cAEvB,OAAc,MAAVF,GAAkBA,EAAOG,QAAUC,MAAMC,QAAQL,EAAOG,QACnDH,EAAOG,OAAOG,KAAI,SAAUC,GACjC,OAAOA,EAAKC,MAAQ,IAAMD,EAAKE,OACjC,IAAGC,KAAK,KAGHT,UAAUU,SACnB,CCTe,SAASC,IACtB,OAAQ,iCAAiCC,KAAKd,IAChD,CCCe,SAASe,EAAsB/D,EAASgE,EAAcC,QAC9C,IAAjBD,IACFA,GAAe,QAGO,IAApBC,IACFA,GAAkB,GAGpB,IAAIC,EAAalE,EAAQ+D,wBACrBI,EAAS,EACTC,EAAS,EAETJ,GAAgBrD,EAAcX,KAChCmE,EAASnE,EAAQqE,YAAc,GAAItB,EAAMmB,EAAWI,OAAStE,EAAQqE,aAAmB,EACxFD,EAASpE,EAAQuE,aAAe,GAAIxB,EAAMmB,EAAWM,QAAUxE,EAAQuE,cAAoB,GAG7F,IACIE,GADOhE,EAAUT,GAAWG,EAAUH,GAAWK,QAC3BoE,eAEtBC,GAAoBb,KAAsBI,EAC1CU,GAAKT,EAAW3F,MAAQmG,GAAoBD,EAAiBA,EAAeG,WAAa,IAAMT,EAC/FU,GAAKX,EAAW9B,KAAOsC,GAAoBD,EAAiBA,EAAeK,UAAY,IAAMV,EAC7FE,EAAQJ,EAAWI,MAAQH,EAC3BK,EAASN,EAAWM,OAASJ,EACjC,MAAO,CACLE,MAAOA,EACPE,OAAQA,EACRpC,IAAKyC,EACLvG,MAAOqG,EAAIL,EACXjG,OAAQwG,EAAIL,EACZjG,KAAMoG,EACNA,EAAGA,EACHE,EAAGA,EAEP,CCrCe,SAASE,EAAc/E,GACpC,IAAIkE,EAAaH,EAAsB/D,GAGnCsE,EAAQtE,EAAQqE,YAChBG,EAASxE,EAAQuE,aAUrB,OARI3B,KAAKoC,IAAId,EAAWI,MAAQA,IAAU,IACxCA,EAAQJ,EAAWI,OAGjB1B,KAAKoC,IAAId,EAAWM,OAASA,IAAW,IAC1CA,EAASN,EAAWM,QAGf,CACLG,EAAG3E,EAAQ4E,WACXC,EAAG7E,EAAQ8E,UACXR,MAAOA,EACPE,OAAQA,EAEZ,CCvBe,SAASS,EAASC,EAAQC,GACvC,IAAIC,EAAWD,EAAME,aAAeF,EAAME,cAE1C,GAAIH,EAAOD,SAASE,GAClB,OAAO,EAEJ,GAAIC,GAAYvE,EAAauE,GAAW,CACzC,IAAIE,EAAOH,EAEX,EAAG,CACD,GAAIG,GAAQJ,EAAOK,WAAWD,GAC5B,OAAO,EAITA,EAAOA,EAAKE,YAAcF,EAAKG,IACjC,OAASH,EACX,CAGF,OAAO,CACT,CCrBe,SAAS,EAAiBtF,GACvC,OAAOG,EAAUH,GAAS0F,iBAAiB1F,EAC7C,CCFe,SAAS2F,EAAe3F,GACrC,MAAO,CAAC,QAAS,KAAM,MAAM4F,QAAQ7F,EAAYC,KAAa,CAChE,CCFe,SAAS6F,EAAmB7F,GAEzC,QAASS,EAAUT,GAAWA,EAAQO,cACtCP,EAAQ8F,WAAazF,OAAOyF,UAAUC,eACxC,CCFe,SAASC,EAAchG,GACpC,MAA6B,SAAzBD,EAAYC,GACPA,EAMPA,EAAQiG,cACRjG,EAAQwF,aACR3E,EAAab,GAAWA,EAAQyF,KAAO,OAEvCI,EAAmB7F,EAGvB,CCVA,SAASkG,EAAoBlG,GAC3B,OAAKW,EAAcX,IACoB,UAAvC,EAAiBA,GAASiC,SAInBjC,EAAQmG,aAHN,IAIX,CAwCe,SAASC,EAAgBpG,GAItC,IAHA,IAAIK,EAASF,EAAUH,GACnBmG,EAAeD,EAAoBlG,GAEhCmG,GAAgBR,EAAeQ,IAA6D,WAA5C,EAAiBA,GAAclE,UACpFkE,EAAeD,EAAoBC,GAGrC,OAAIA,IAA+C,SAA9BpG,EAAYoG,IAA0D,SAA9BpG,EAAYoG,IAAwE,WAA5C,EAAiBA,GAAclE,UAC3H5B,EAGF8F,GAhDT,SAA4BnG,GAC1B,IAAIqG,EAAY,WAAWvC,KAAKd,KAGhC,GAFW,WAAWc,KAAKd,MAEfrC,EAAcX,IAII,UAFX,EAAiBA,GAEnBiC,SACb,OAAO,KAIX,IAAIqE,EAAcN,EAAchG,GAMhC,IAJIa,EAAayF,KACfA,EAAcA,EAAYb,MAGrB9E,EAAc2F,IAAgB,CAAC,OAAQ,QAAQV,QAAQ7F,EAAYuG,IAAgB,GAAG,CAC3F,IAAIC,EAAM,EAAiBD,GAI3B,GAAsB,SAAlBC,EAAIC,WAA4C,SAApBD,EAAIE,aAA0C,UAAhBF,EAAIG,UAAiF,IAA1D,CAAC,YAAa,eAAed,QAAQW,EAAII,aAAsBN,GAAgC,WAAnBE,EAAII,YAA2BN,GAAaE,EAAIK,QAAyB,SAAfL,EAAIK,OACjO,OAAON,EAEPA,EAAcA,EAAYd,UAE9B,CAEA,OAAO,IACT,CAgByBqB,CAAmB7G,IAAYK,CACxD,CCpEe,SAASyG,EAAyB3H,GAC/C,MAAO,CAAC,MAAO,UAAUyG,QAAQzG,IAAc,EAAI,IAAM,GAC3D,CCDO,SAAS4H,EAAOjE,EAAK1E,EAAOyE,GACjC,OAAO,EAAQC,EAAK,EAAQ1E,EAAOyE,GACrC,CCFe,SAASmE,EAAmBC,GACzC,OAAOxJ,OAAOkE,OAAO,CAAC,ECDf,CACLS,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GDHuC0I,EACjD,CEHe,SAASC,EAAgB9I,EAAOiD,GAC7C,OAAOA,EAAKpC,QAAO,SAAUkI,EAAS5J,GAEpC,OADA4J,EAAQ5J,GAAOa,EACR+I,CACT,GAAG,CAAC,EACN,CC4EA,SACEpG,KAAM,QACNC,SAAS,EACTC,MAAO,OACPC,GApEF,SAAeC,GACb,IAAIiG,EAEAhG,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZmB,EAAUf,EAAKe,QACfmF,EAAejG,EAAME,SAASgB,MAC9BgF,EAAgBlG,EAAMmG,cAAcD,cACpCE,EAAgB9E,EAAiBtB,EAAMjC,WACvCsI,EAAOX,EAAyBU,GAEhCE,EADa,CAACnJ,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAClC,SAAW,QAElC,GAAKH,GAAiBC,EAAtB,CAIA,IAAIL,EAxBgB,SAAyBU,EAASvG,GAItD,OAAO4F,EAAsC,iBAH7CW,EAA6B,mBAAZA,EAAyBA,EAAQlK,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CAC/EzI,UAAWiC,EAAMjC,aACbwI,GACkDA,EAAUT,EAAgBS,EAASlJ,GAC7F,CAmBsBoJ,CAAgB3F,EAAQyF,QAASvG,GACjD0G,EAAY/C,EAAcsC,GAC1BU,EAAmB,MAATN,EAAe,EAAMlJ,EAC/ByJ,EAAmB,MAATP,EAAepJ,EAASC,EAClC2J,EAAU7G,EAAMwG,MAAM7I,UAAU2I,GAAOtG,EAAMwG,MAAM7I,UAAU0I,GAAQH,EAAcG,GAAQrG,EAAMwG,MAAM9I,OAAO4I,GAC9GQ,EAAYZ,EAAcG,GAAQrG,EAAMwG,MAAM7I,UAAU0I,GACxDU,EAAoB/B,EAAgBiB,GACpCe,EAAaD,EAA6B,MAATV,EAAeU,EAAkBE,cAAgB,EAAIF,EAAkBG,aAAe,EAAI,EAC3HC,EAAoBN,EAAU,EAAIC,EAAY,EAG9CpF,EAAMmE,EAAcc,GACpBlF,EAAMuF,EAAaN,EAAUJ,GAAOT,EAAce,GAClDQ,EAASJ,EAAa,EAAIN,EAAUJ,GAAO,EAAIa,EAC/CE,EAAS1B,EAAOjE,EAAK0F,EAAQ3F,GAE7B6F,EAAWjB,EACfrG,EAAMmG,cAAcxG,KAASqG,EAAwB,CAAC,GAAyBsB,GAAYD,EAAQrB,EAAsBuB,aAAeF,EAASD,EAAQpB,EAnBzJ,CAoBF,EAkCEtF,OAhCF,SAAgBC,GACd,IAAIX,EAAQW,EAAMX,MAEdwH,EADU7G,EAAMG,QACWlC,QAC3BqH,OAAoC,IAArBuB,EAA8B,sBAAwBA,EAErD,MAAhBvB,IAKwB,iBAAjBA,IACTA,EAAejG,EAAME,SAASxC,OAAO+J,cAAcxB,MAOhDpC,EAAS7D,EAAME,SAASxC,OAAQuI,KAIrCjG,EAAME,SAASgB,MAAQ+E,EACzB,EASE5E,SAAU,CAAC,iBACXqG,iBAAkB,CAAC,oBCxFN,SAASC,EAAa5J,GACnC,OAAOA,EAAUwD,MAAM,KAAK,EAC9B,CCOA,IAAIqG,EAAa,CACf5G,IAAK,OACL9D,MAAO,OACPD,OAAQ,OACRE,KAAM,QAeD,SAAS0K,GAAYlH,GAC1B,IAAImH,EAEApK,EAASiD,EAAMjD,OACfqK,EAAapH,EAAMoH,WACnBhK,EAAY4C,EAAM5C,UAClBiK,EAAYrH,EAAMqH,UAClBC,EAAUtH,EAAMsH,QAChBpH,EAAWF,EAAME,SACjBqH,EAAkBvH,EAAMuH,gBACxBC,EAAWxH,EAAMwH,SACjBC,EAAezH,EAAMyH,aACrBC,EAAU1H,EAAM0H,QAChBC,EAAaL,EAAQ1E,EACrBA,OAAmB,IAAf+E,EAAwB,EAAIA,EAChCC,EAAaN,EAAQxE,EACrBA,OAAmB,IAAf8E,EAAwB,EAAIA,EAEhCC,EAAgC,mBAAjBJ,EAA8BA,EAAa,CAC5D7E,EAAGA,EACHE,IACG,CACHF,EAAGA,EACHE,GAGFF,EAAIiF,EAAMjF,EACVE,EAAI+E,EAAM/E,EACV,IAAIgF,EAAOR,EAAQrL,eAAe,KAC9B8L,EAAOT,EAAQrL,eAAe,KAC9B+L,EAAQxL,EACRyL,EAAQ,EACRC,EAAM5J,OAEV,GAAIkJ,EAAU,CACZ,IAAIpD,EAAeC,EAAgBtH,GAC/BoL,EAAa,eACbC,EAAY,cAEZhE,IAAiBhG,EAAUrB,IAGmB,WAA5C,EAFJqH,EAAeN,EAAmB/G,IAECmD,UAAsC,aAAbA,IAC1DiI,EAAa,eACbC,EAAY,gBAOZhL,IAAc,IAAQA,IAAcZ,GAAQY,IAAcb,IAAU8K,IAAczK,KACpFqL,EAAQ3L,EAGRwG,IAFc4E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeD,OACzF2B,EAAa+D,IACEf,EAAW3E,OAC1BK,GAAKyE,EAAkB,GAAK,GAG1BnK,IAAcZ,IAASY,IAAc,GAAOA,IAAcd,GAAW+K,IAAczK,KACrFoL,EAAQzL,EAGRqG,IAFc8E,GAAWtD,IAAiB8D,GAAOA,EAAIxF,eAAiBwF,EAAIxF,eAAeH,MACzF6B,EAAagE,IACEhB,EAAW7E,MAC1BK,GAAK2E,EAAkB,GAAK,EAEhC,CAEA,IAgBMc,EAhBFC,EAAe5M,OAAOkE,OAAO,CAC/BM,SAAUA,GACTsH,GAAYP,GAEXsB,GAAyB,IAAjBd,EAlFd,SAA2BrI,EAAM8I,GAC/B,IAAItF,EAAIxD,EAAKwD,EACTE,EAAI1D,EAAK0D,EACT0F,EAAMN,EAAIO,kBAAoB,EAClC,MAAO,CACL7F,EAAG5B,EAAM4B,EAAI4F,GAAOA,GAAO,EAC3B1F,EAAG9B,EAAM8B,EAAI0F,GAAOA,GAAO,EAE/B,CA0EsCE,CAAkB,CACpD9F,EAAGA,EACHE,GACC1E,EAAUrB,IAAW,CACtB6F,EAAGA,EACHE,GAMF,OAHAF,EAAI2F,EAAM3F,EACVE,EAAIyF,EAAMzF,EAENyE,EAGK7L,OAAOkE,OAAO,CAAC,EAAG0I,IAAeD,EAAiB,CAAC,GAAkBJ,GAASF,EAAO,IAAM,GAAIM,EAAeL,GAASF,EAAO,IAAM,GAAIO,EAAe5D,WAAayD,EAAIO,kBAAoB,IAAM,EAAI,aAAe7F,EAAI,OAASE,EAAI,MAAQ,eAAiBF,EAAI,OAASE,EAAI,SAAUuF,IAG5R3M,OAAOkE,OAAO,CAAC,EAAG0I,IAAenB,EAAkB,CAAC,GAAmBc,GAASF,EAAOjF,EAAI,KAAO,GAAIqE,EAAgBa,GAASF,EAAOlF,EAAI,KAAO,GAAIuE,EAAgB1C,UAAY,GAAI0C,GAC9L,CA4CA,UACEnI,KAAM,gBACNC,SAAS,EACTC,MAAO,cACPC,GA9CF,SAAuBwJ,GACrB,IAAItJ,EAAQsJ,EAAMtJ,MACdc,EAAUwI,EAAMxI,QAChByI,EAAwBzI,EAAQoH,gBAChCA,OAA4C,IAA1BqB,GAA0CA,EAC5DC,EAAoB1I,EAAQqH,SAC5BA,OAAiC,IAAtBqB,GAAsCA,EACjDC,EAAwB3I,EAAQsH,aAChCA,OAAyC,IAA1BqB,GAA0CA,EACzDR,EAAe,CACjBlL,UAAWuD,EAAiBtB,EAAMjC,WAClCiK,UAAWL,EAAa3H,EAAMjC,WAC9BL,OAAQsC,EAAME,SAASxC,OACvBqK,WAAY/H,EAAMwG,MAAM9I,OACxBwK,gBAAiBA,EACjBG,QAAoC,UAA3BrI,EAAMc,QAAQC,UAGgB,MAArCf,EAAMmG,cAAcD,gBACtBlG,EAAMK,OAAO3C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAO3C,OAAQmK,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACvGhB,QAASjI,EAAMmG,cAAcD,cAC7BrF,SAAUb,EAAMc,QAAQC,SACxBoH,SAAUA,EACVC,aAAcA,OAIe,MAA7BpI,EAAMmG,cAAcjF,QACtBlB,EAAMK,OAAOa,MAAQ7E,OAAOkE,OAAO,CAAC,EAAGP,EAAMK,OAAOa,MAAO2G,GAAYxL,OAAOkE,OAAO,CAAC,EAAG0I,EAAc,CACrGhB,QAASjI,EAAMmG,cAAcjF,MAC7BL,SAAU,WACVsH,UAAU,EACVC,aAAcA,OAIlBpI,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,wBAAyBsC,EAAMjC,WAEnC,EAQE2L,KAAM,CAAC,GCrKT,IAAIC,GAAU,CACZA,SAAS,GAsCX,UACEhK,KAAM,iBACNC,SAAS,EACTC,MAAO,QACPC,GAAI,WAAe,EACnBY,OAxCF,SAAgBX,GACd,IAAIC,EAAQD,EAAKC,MACb4J,EAAW7J,EAAK6J,SAChB9I,EAAUf,EAAKe,QACf+I,EAAkB/I,EAAQgJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAkBjJ,EAAQkJ,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7C9K,EAASF,EAAUiB,EAAME,SAASxC,QAClCuM,EAAgB,GAAGjM,OAAOgC,EAAMiK,cAActM,UAAWqC,EAAMiK,cAAcvM,QAYjF,OAVIoM,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaC,iBAAiB,SAAUP,EAASQ,OAAQT,GAC3D,IAGEK,GACF/K,EAAOkL,iBAAiB,SAAUP,EAASQ,OAAQT,IAG9C,WACDG,GACFG,EAAc9J,SAAQ,SAAU+J,GAC9BA,EAAaG,oBAAoB,SAAUT,EAASQ,OAAQT,GAC9D,IAGEK,GACF/K,EAAOoL,oBAAoB,SAAUT,EAASQ,OAAQT,GAE1D,CACF,EASED,KAAM,CAAC,GC/CT,IAAIY,GAAO,CACTnN,KAAM,QACND,MAAO,OACPD,OAAQ,MACR+D,IAAK,UAEQ,SAASuJ,GAAqBxM,GAC3C,OAAOA,EAAUyM,QAAQ,0BAA0B,SAAUC,GAC3D,OAAOH,GAAKG,EACd,GACF,CCVA,IAAI,GAAO,CACTnN,MAAO,MACPC,IAAK,SAEQ,SAASmN,GAA8B3M,GACpD,OAAOA,EAAUyM,QAAQ,cAAc,SAAUC,GAC/C,OAAO,GAAKA,EACd,GACF,CCPe,SAASE,GAAgB3L,GACtC,IAAI6J,EAAM9J,EAAUC,GAGpB,MAAO,CACL4L,WAHe/B,EAAIgC,YAInBC,UAHcjC,EAAIkC,YAKtB,CCNe,SAASC,GAAoBpM,GAQ1C,OAAO+D,EAAsB8B,EAAmB7F,IAAUzB,KAAOwN,GAAgB/L,GAASgM,UAC5F,CCXe,SAASK,GAAerM,GAErC,IAAIsM,EAAoB,EAAiBtM,GACrCuM,EAAWD,EAAkBC,SAC7BC,EAAYF,EAAkBE,UAC9BC,EAAYH,EAAkBG,UAElC,MAAO,6BAA6B3I,KAAKyI,EAAWE,EAAYD,EAClE,CCLe,SAASE,GAAgBtM,GACtC,MAAI,CAAC,OAAQ,OAAQ,aAAawF,QAAQ7F,EAAYK,KAAU,EAEvDA,EAAKG,cAAcoM,KAGxBhM,EAAcP,IAASiM,GAAejM,GACjCA,EAGFsM,GAAgB1G,EAAc5F,GACvC,CCJe,SAASwM,GAAkB5M,EAAS6M,GACjD,IAAIC,OAES,IAATD,IACFA,EAAO,IAGT,IAAIvB,EAAeoB,GAAgB1M,GAC/B+M,EAASzB,KAAqE,OAAlDwB,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,MACpH1C,EAAM9J,EAAUmL,GAChB0B,EAASD,EAAS,CAAC9C,GAAK7K,OAAO6K,EAAIxF,gBAAkB,GAAI4H,GAAef,GAAgBA,EAAe,IAAMA,EAC7G2B,EAAcJ,EAAKzN,OAAO4N,GAC9B,OAAOD,EAASE,EAChBA,EAAY7N,OAAOwN,GAAkB5G,EAAcgH,IACrD,CCzBe,SAASE,GAAiBC,GACvC,OAAO1P,OAAOkE,OAAO,CAAC,EAAGwL,EAAM,CAC7B5O,KAAM4O,EAAKxI,EACXvC,IAAK+K,EAAKtI,EACVvG,MAAO6O,EAAKxI,EAAIwI,EAAK7I,MACrBjG,OAAQ8O,EAAKtI,EAAIsI,EAAK3I,QAE1B,CCqBA,SAAS4I,GAA2BpN,EAASqN,EAAgBlL,GAC3D,OAAOkL,IAAmBxO,EAAWqO,GCzBxB,SAAyBlN,EAASmC,GAC/C,IAAI8H,EAAM9J,EAAUH,GAChBsN,EAAOzH,EAAmB7F,GAC1ByE,EAAiBwF,EAAIxF,eACrBH,EAAQgJ,EAAKhF,YACb9D,EAAS8I,EAAKjF,aACd1D,EAAI,EACJE,EAAI,EAER,GAAIJ,EAAgB,CAClBH,EAAQG,EAAeH,MACvBE,EAASC,EAAeD,OACxB,IAAI+I,EAAiB1J,KAEjB0J,IAAmBA,GAA+B,UAAbpL,KACvCwC,EAAIF,EAAeG,WACnBC,EAAIJ,EAAeK,UAEvB,CAEA,MAAO,CACLR,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EAAIyH,GAAoBpM,GAC3B6E,EAAGA,EAEP,CDDwD2I,CAAgBxN,EAASmC,IAAa1B,EAAU4M,GAdxG,SAAoCrN,EAASmC,GAC3C,IAAIgL,EAAOpJ,EAAsB/D,GAAS,EAAoB,UAAbmC,GASjD,OARAgL,EAAK/K,IAAM+K,EAAK/K,IAAMpC,EAAQyN,UAC9BN,EAAK5O,KAAO4O,EAAK5O,KAAOyB,EAAQ0N,WAChCP,EAAK9O,OAAS8O,EAAK/K,IAAMpC,EAAQqI,aACjC8E,EAAK7O,MAAQ6O,EAAK5O,KAAOyB,EAAQsI,YACjC6E,EAAK7I,MAAQtE,EAAQsI,YACrB6E,EAAK3I,OAASxE,EAAQqI,aACtB8E,EAAKxI,EAAIwI,EAAK5O,KACd4O,EAAKtI,EAAIsI,EAAK/K,IACP+K,CACT,CAG0HQ,CAA2BN,EAAgBlL,GAAY+K,GEtBlK,SAAyBlN,GACtC,IAAI8M,EAEAQ,EAAOzH,EAAmB7F,GAC1B4N,EAAY7B,GAAgB/L,GAC5B2M,EAA0D,OAAlDG,EAAwB9M,EAAQO,oBAAyB,EAASuM,EAAsBH,KAChGrI,EAAQ,EAAIgJ,EAAKO,YAAaP,EAAKhF,YAAaqE,EAAOA,EAAKkB,YAAc,EAAGlB,EAAOA,EAAKrE,YAAc,GACvG9D,EAAS,EAAI8I,EAAKQ,aAAcR,EAAKjF,aAAcsE,EAAOA,EAAKmB,aAAe,EAAGnB,EAAOA,EAAKtE,aAAe,GAC5G1D,GAAKiJ,EAAU5B,WAAaI,GAAoBpM,GAChD6E,GAAK+I,EAAU1B,UAMnB,MAJiD,QAA7C,EAAiBS,GAAQW,GAAMS,YACjCpJ,GAAK,EAAI2I,EAAKhF,YAAaqE,EAAOA,EAAKrE,YAAc,GAAKhE,GAGrD,CACLA,MAAOA,EACPE,OAAQA,EACRG,EAAGA,EACHE,EAAGA,EAEP,CFCkMmJ,CAAgBnI,EAAmB7F,IACrO,CG1Be,SAASiO,GAAe9M,GACrC,IAOIkI,EAPAtK,EAAYoC,EAAKpC,UACjBiB,EAAUmB,EAAKnB,QACfb,EAAYgC,EAAKhC,UACjBqI,EAAgBrI,EAAYuD,EAAiBvD,GAAa,KAC1DiK,EAAYjK,EAAY4J,EAAa5J,GAAa,KAClD+O,EAAUnP,EAAU4F,EAAI5F,EAAUuF,MAAQ,EAAItE,EAAQsE,MAAQ,EAC9D6J,EAAUpP,EAAU8F,EAAI9F,EAAUyF,OAAS,EAAIxE,EAAQwE,OAAS,EAGpE,OAAQgD,GACN,KAAK,EACH6B,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI7E,EAAQwE,QAE3B,MAEF,KAAKnG,EACHgL,EAAU,CACR1E,EAAGuJ,EACHrJ,EAAG9F,EAAU8F,EAAI9F,EAAUyF,QAE7B,MAEF,KAAKlG,EACH+K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI5F,EAAUuF,MAC3BO,EAAGsJ,GAEL,MAEF,KAAK5P,EACH8K,EAAU,CACR1E,EAAG5F,EAAU4F,EAAI3E,EAAQsE,MACzBO,EAAGsJ,GAEL,MAEF,QACE9E,EAAU,CACR1E,EAAG5F,EAAU4F,EACbE,EAAG9F,EAAU8F,GAInB,IAAIuJ,EAAW5G,EAAgBV,EAAyBU,GAAiB,KAEzE,GAAgB,MAAZ4G,EAAkB,CACpB,IAAI1G,EAAmB,MAAb0G,EAAmB,SAAW,QAExC,OAAQhF,GACN,KAAK1K,EACH2K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAC7E,MAEF,KAAK/I,EACH0K,EAAQ+E,GAAY/E,EAAQ+E,IAAarP,EAAU2I,GAAO,EAAI1H,EAAQ0H,GAAO,GAKnF,CAEA,OAAO2B,CACT,CC3De,SAASgF,GAAejN,EAAOc,QAC5B,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACXqM,EAAqBD,EAASnP,UAC9BA,OAAmC,IAAvBoP,EAAgCnN,EAAMjC,UAAYoP,EAC9DC,EAAoBF,EAASnM,SAC7BA,OAAiC,IAAtBqM,EAA+BpN,EAAMe,SAAWqM,EAC3DC,EAAoBH,EAASI,SAC7BA,OAAiC,IAAtBD,EAA+B7P,EAAkB6P,EAC5DE,EAAwBL,EAASM,aACjCA,OAAyC,IAA1BD,EAAmC9P,EAAW8P,EAC7DE,EAAwBP,EAASQ,eACjCA,OAA2C,IAA1BD,EAAmC/P,EAAS+P,EAC7DE,EAAuBT,EAASU,YAChCA,OAAuC,IAAzBD,GAA0CA,EACxDE,EAAmBX,EAAS3G,QAC5BA,OAA+B,IAArBsH,EAA8B,EAAIA,EAC5ChI,EAAgBD,EAAsC,iBAAZW,EAAuBA,EAAUT,EAAgBS,EAASlJ,IACpGyQ,EAAaJ,IAAmBhQ,EAASC,EAAYD,EACrDqK,EAAa/H,EAAMwG,MAAM9I,OACzBkB,EAAUoB,EAAME,SAAS0N,EAAcE,EAAaJ,GACpDK,EJkBS,SAAyBnP,EAAS0O,EAAUE,EAAczM,GACvE,IAAIiN,EAAmC,oBAAbV,EAlB5B,SAA4B1O,GAC1B,IAAIpB,EAAkBgO,GAAkB5G,EAAchG,IAElDqP,EADoB,CAAC,WAAY,SAASzJ,QAAQ,EAAiB5F,GAASiC,WAAa,GACnDtB,EAAcX,GAAWoG,EAAgBpG,GAAWA,EAE9F,OAAKS,EAAU4O,GAKRzQ,EAAgBgI,QAAO,SAAUyG,GACtC,OAAO5M,EAAU4M,IAAmBpI,EAASoI,EAAgBgC,IAAmD,SAAhCtP,EAAYsN,EAC9F,IANS,EAOX,CAK6DiC,CAAmBtP,GAAW,GAAGZ,OAAOsP,GAC/F9P,EAAkB,GAAGQ,OAAOgQ,EAAqB,CAACR,IAClDW,EAAsB3Q,EAAgB,GACtC4Q,EAAe5Q,EAAgBK,QAAO,SAAUwQ,EAASpC,GAC3D,IAAIF,EAAOC,GAA2BpN,EAASqN,EAAgBlL,GAK/D,OAJAsN,EAAQrN,IAAM,EAAI+K,EAAK/K,IAAKqN,EAAQrN,KACpCqN,EAAQnR,MAAQ,EAAI6O,EAAK7O,MAAOmR,EAAQnR,OACxCmR,EAAQpR,OAAS,EAAI8O,EAAK9O,OAAQoR,EAAQpR,QAC1CoR,EAAQlR,KAAO,EAAI4O,EAAK5O,KAAMkR,EAAQlR,MAC/BkR,CACT,GAAGrC,GAA2BpN,EAASuP,EAAqBpN,IAK5D,OAJAqN,EAAalL,MAAQkL,EAAalR,MAAQkR,EAAajR,KACvDiR,EAAahL,OAASgL,EAAanR,OAASmR,EAAapN,IACzDoN,EAAa7K,EAAI6K,EAAajR,KAC9BiR,EAAa3K,EAAI2K,EAAapN,IACvBoN,CACT,CInC2BE,CAAgBjP,EAAUT,GAAWA,EAAUA,EAAQ2P,gBAAkB9J,EAAmBzE,EAAME,SAASxC,QAAS4P,EAAUE,EAAczM,GACjKyN,EAAsB7L,EAAsB3C,EAAME,SAASvC,WAC3DuI,EAAgB2G,GAAe,CACjClP,UAAW6Q,EACX5P,QAASmJ,EACThH,SAAU,WACVhD,UAAWA,IAET0Q,EAAmB3C,GAAiBzP,OAAOkE,OAAO,CAAC,EAAGwH,EAAY7B,IAClEwI,EAAoBhB,IAAmBhQ,EAAS+Q,EAAmBD,EAGnEG,EAAkB,CACpB3N,IAAK+M,EAAmB/M,IAAM0N,EAAkB1N,IAAM6E,EAAc7E,IACpE/D,OAAQyR,EAAkBzR,OAAS8Q,EAAmB9Q,OAAS4I,EAAc5I,OAC7EE,KAAM4Q,EAAmB5Q,KAAOuR,EAAkBvR,KAAO0I,EAAc1I,KACvED,MAAOwR,EAAkBxR,MAAQ6Q,EAAmB7Q,MAAQ2I,EAAc3I,OAExE0R,EAAa5O,EAAMmG,cAAckB,OAErC,GAAIqG,IAAmBhQ,GAAUkR,EAAY,CAC3C,IAAIvH,EAASuH,EAAW7Q,GACxB1B,OAAO4D,KAAK0O,GAAiBxO,SAAQ,SAAUhE,GAC7C,IAAI0S,EAAW,CAAC3R,EAAOD,GAAQuH,QAAQrI,IAAQ,EAAI,GAAK,EACpDkK,EAAO,CAAC,EAAKpJ,GAAQuH,QAAQrI,IAAQ,EAAI,IAAM,IACnDwS,EAAgBxS,IAAQkL,EAAOhB,GAAQwI,CACzC,GACF,CAEA,OAAOF,CACT,CCyEA,UACEhP,KAAM,OACNC,SAAS,EACTC,MAAO,OACPC,GA5HF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KAEhB,IAAIK,EAAMmG,cAAcxG,GAAMmP,MAA9B,CAoCA,IAhCA,IAAIC,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAqCA,EACpDG,EAA8BtO,EAAQuO,mBACtC9I,EAAUzF,EAAQyF,QAClB+G,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtB0B,EAAwBxO,EAAQyO,eAChCA,OAA2C,IAA1BD,GAA0CA,EAC3DE,EAAwB1O,EAAQ0O,sBAChCC,EAAqBzP,EAAMc,QAAQ/C,UACnCqI,EAAgB9E,EAAiBmO,GAEjCJ,EAAqBD,IADHhJ,IAAkBqJ,GACqCF,EAjC/E,SAAuCxR,GACrC,GAAIuD,EAAiBvD,KAAeX,EAClC,MAAO,GAGT,IAAIsS,EAAoBnF,GAAqBxM,GAC7C,MAAO,CAAC2M,GAA8B3M,GAAY2R,EAAmBhF,GAA8BgF,GACrG,CA0B6IC,CAA8BF,GAA3E,CAAClF,GAAqBkF,KAChHG,EAAa,CAACH,GAAoBzR,OAAOqR,GAAoBxR,QAAO,SAAUC,EAAKC,GACrF,OAAOD,EAAIE,OAAOsD,EAAiBvD,KAAeX,ECvCvC,SAA8B4C,EAAOc,QAClC,IAAZA,IACFA,EAAU,CAAC,GAGb,IAAIoM,EAAWpM,EACX/C,EAAYmP,EAASnP,UACrBuP,EAAWJ,EAASI,SACpBE,EAAeN,EAASM,aACxBjH,EAAU2G,EAAS3G,QACnBgJ,EAAiBrC,EAASqC,eAC1BM,EAAwB3C,EAASsC,sBACjCA,OAAkD,IAA1BK,EAAmC,EAAgBA,EAC3E7H,EAAYL,EAAa5J,GACzB6R,EAAa5H,EAAYuH,EAAiB3R,EAAsBA,EAAoB4H,QAAO,SAAUzH,GACvG,OAAO4J,EAAa5J,KAAeiK,CACrC,IAAK3K,EACDyS,EAAoBF,EAAWpK,QAAO,SAAUzH,GAClD,OAAOyR,EAAsBhL,QAAQzG,IAAc,CACrD,IAEiC,IAA7B+R,EAAkBC,SACpBD,EAAoBF,GAItB,IAAII,EAAYF,EAAkBjS,QAAO,SAAUC,EAAKC,GAOtD,OANAD,EAAIC,GAAakP,GAAejN,EAAO,CACrCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,IACRjF,EAAiBvD,IACbD,CACT,GAAG,CAAC,GACJ,OAAOzB,OAAO4D,KAAK+P,GAAWC,MAAK,SAAUC,EAAGC,GAC9C,OAAOH,EAAUE,GAAKF,EAAUG,EAClC,GACF,CDC6DC,CAAqBpQ,EAAO,CACnFjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTgJ,eAAgBA,EAChBC,sBAAuBA,IACpBzR,EACP,GAAG,IACCsS,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzB4S,EAAY,IAAIC,IAChBC,GAAqB,EACrBC,EAAwBb,EAAW,GAE9Bc,EAAI,EAAGA,EAAId,EAAWG,OAAQW,IAAK,CAC1C,IAAI3S,EAAY6R,EAAWc,GAEvBC,EAAiBrP,EAAiBvD,GAElC6S,EAAmBjJ,EAAa5J,KAAeT,EAC/CuT,EAAa,CAAC,EAAK5T,GAAQuH,QAAQmM,IAAmB,EACtDrK,EAAMuK,EAAa,QAAU,SAC7B1F,EAAW8B,GAAejN,EAAO,CACnCjC,UAAWA,EACXuP,SAAUA,EACVE,aAAcA,EACdI,YAAaA,EACbrH,QAASA,IAEPuK,EAAoBD,EAAaD,EAAmB1T,EAAQC,EAAOyT,EAAmB3T,EAAS,EAE/FoT,EAAc/J,GAAOyB,EAAWzB,KAClCwK,EAAoBvG,GAAqBuG,IAG3C,IAAIC,EAAmBxG,GAAqBuG,GACxCE,EAAS,GAUb,GARIhC,GACFgC,EAAOC,KAAK9F,EAASwF,IAAmB,GAGtCxB,GACF6B,EAAOC,KAAK9F,EAAS2F,IAAsB,EAAG3F,EAAS4F,IAAqB,GAG1EC,EAAOE,OAAM,SAAUC,GACzB,OAAOA,CACT,IAAI,CACFV,EAAwB1S,EACxByS,GAAqB,EACrB,KACF,CAEAF,EAAUc,IAAIrT,EAAWiT,EAC3B,CAEA,GAAIR,EAqBF,IAnBA,IAEIa,EAAQ,SAAeC,GACzB,IAAIC,EAAmB3B,EAAW4B,MAAK,SAAUzT,GAC/C,IAAIiT,EAASV,EAAU9T,IAAIuB,GAE3B,GAAIiT,EACF,OAAOA,EAAOS,MAAM,EAAGH,GAAIJ,OAAM,SAAUC,GACzC,OAAOA,CACT,GAEJ,IAEA,GAAII,EAEF,OADAd,EAAwBc,EACjB,OAEX,EAESD,EAnBY/B,EAAiB,EAAI,EAmBZ+B,EAAK,GAGpB,UAFFD,EAAMC,GADmBA,KAOpCtR,EAAMjC,YAAc0S,IACtBzQ,EAAMmG,cAAcxG,GAAMmP,OAAQ,EAClC9O,EAAMjC,UAAY0S,EAClBzQ,EAAM0R,OAAQ,EA5GhB,CA8GF,EAQEhK,iBAAkB,CAAC,UACnBgC,KAAM,CACJoF,OAAO,IE7IX,SAAS6C,GAAexG,EAAUY,EAAM6F,GAQtC,YAPyB,IAArBA,IACFA,EAAmB,CACjBrO,EAAG,EACHE,EAAG,IAIA,CACLzC,IAAKmK,EAASnK,IAAM+K,EAAK3I,OAASwO,EAAiBnO,EACnDvG,MAAOiO,EAASjO,MAAQ6O,EAAK7I,MAAQ0O,EAAiBrO,EACtDtG,OAAQkO,EAASlO,OAAS8O,EAAK3I,OAASwO,EAAiBnO,EACzDtG,KAAMgO,EAAShO,KAAO4O,EAAK7I,MAAQ0O,EAAiBrO,EAExD,CAEA,SAASsO,GAAsB1G,GAC7B,MAAO,CAAC,EAAKjO,EAAOD,EAAQE,GAAM2U,MAAK,SAAUC,GAC/C,OAAO5G,EAAS4G,IAAS,CAC3B,GACF,CA+BA,UACEpS,KAAM,OACNC,SAAS,EACTC,MAAO,OACP6H,iBAAkB,CAAC,mBACnB5H,GAlCF,SAAcC,GACZ,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KACZ0Q,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBkU,EAAmB5R,EAAMmG,cAAc6L,gBACvCC,EAAoBhF,GAAejN,EAAO,CAC5C0N,eAAgB,cAEdwE,EAAoBjF,GAAejN,EAAO,CAC5C4N,aAAa,IAEXuE,EAA2BR,GAAeM,EAAmB5B,GAC7D+B,EAAsBT,GAAeO,EAAmBnK,EAAY6J,GACpES,EAAoBR,GAAsBM,GAC1CG,EAAmBT,GAAsBO,GAC7CpS,EAAMmG,cAAcxG,GAAQ,CAC1BwS,yBAA0BA,EAC1BC,oBAAqBA,EACrBC,kBAAmBA,EACnBC,iBAAkBA,GAEpBtS,EAAMM,WAAW5C,OAASrB,OAAOkE,OAAO,CAAC,EAAGP,EAAMM,WAAW5C,OAAQ,CACnE,+BAAgC2U,EAChC,sBAAuBC,GAE3B,GCJA,IACE3S,KAAM,SACNC,SAAS,EACTC,MAAO,OACPwB,SAAU,CAAC,iBACXvB,GA5BF,SAAgBa,GACd,IAAIX,EAAQW,EAAMX,MACdc,EAAUH,EAAMG,QAChBnB,EAAOgB,EAAMhB,KACb4S,EAAkBzR,EAAQuG,OAC1BA,OAA6B,IAApBkL,EAA6B,CAAC,EAAG,GAAKA,EAC/C7I,EAAO,EAAW7L,QAAO,SAAUC,EAAKC,GAE1C,OADAD,EAAIC,GA5BD,SAAiCA,EAAWyI,EAAOa,GACxD,IAAIjB,EAAgB9E,EAAiBvD,GACjCyU,EAAiB,CAACrV,EAAM,GAAKqH,QAAQ4B,IAAkB,GAAK,EAAI,EAEhErG,EAAyB,mBAAXsH,EAAwBA,EAAOhL,OAAOkE,OAAO,CAAC,EAAGiG,EAAO,CACxEzI,UAAWA,KACPsJ,EACFoL,EAAW1S,EAAK,GAChB2S,EAAW3S,EAAK,GAIpB,OAFA0S,EAAWA,GAAY,EACvBC,GAAYA,GAAY,GAAKF,EACtB,CAACrV,EAAMD,GAAOsH,QAAQ4B,IAAkB,EAAI,CACjD7C,EAAGmP,EACHjP,EAAGgP,GACD,CACFlP,EAAGkP,EACHhP,EAAGiP,EAEP,CASqBC,CAAwB5U,EAAWiC,EAAMwG,MAAOa,GAC1DvJ,CACT,GAAG,CAAC,GACA8U,EAAwBlJ,EAAK1J,EAAMjC,WACnCwF,EAAIqP,EAAsBrP,EAC1BE,EAAImP,EAAsBnP,EAEW,MAArCzD,EAAMmG,cAAcD,gBACtBlG,EAAMmG,cAAcD,cAAc3C,GAAKA,EACvCvD,EAAMmG,cAAcD,cAAczC,GAAKA,GAGzCzD,EAAMmG,cAAcxG,GAAQ+J,CAC9B,GC1BA,IACE/J,KAAM,gBACNC,SAAS,EACTC,MAAO,OACPC,GApBF,SAAuBC,GACrB,IAAIC,EAAQD,EAAKC,MACbL,EAAOI,EAAKJ,KAKhBK,EAAMmG,cAAcxG,GAAQkN,GAAe,CACzClP,UAAWqC,EAAMwG,MAAM7I,UACvBiB,QAASoB,EAAMwG,MAAM9I,OACrBqD,SAAU,WACVhD,UAAWiC,EAAMjC,WAErB,EAQE2L,KAAM,CAAC,GCgHT,IACE/J,KAAM,kBACNC,SAAS,EACTC,MAAO,OACPC,GA/HF,SAAyBC,GACvB,IAAIC,EAAQD,EAAKC,MACbc,EAAUf,EAAKe,QACfnB,EAAOI,EAAKJ,KACZoP,EAAoBjO,EAAQkM,SAC5BgC,OAAsC,IAAtBD,GAAsCA,EACtDE,EAAmBnO,EAAQoO,QAC3BC,OAAoC,IAArBF,GAAsCA,EACrD3B,EAAWxM,EAAQwM,SACnBE,EAAe1M,EAAQ0M,aACvBI,EAAc9M,EAAQ8M,YACtBrH,EAAUzF,EAAQyF,QAClBsM,EAAkB/R,EAAQgS,OAC1BA,OAA6B,IAApBD,GAAoCA,EAC7CE,EAAwBjS,EAAQkS,aAChCA,OAAyC,IAA1BD,EAAmC,EAAIA,EACtD5H,EAAW8B,GAAejN,EAAO,CACnCsN,SAAUA,EACVE,aAAcA,EACdjH,QAASA,EACTqH,YAAaA,IAEXxH,EAAgB9E,EAAiBtB,EAAMjC,WACvCiK,EAAYL,EAAa3H,EAAMjC,WAC/BkV,GAAmBjL,EACnBgF,EAAWtH,EAAyBU,GACpC8I,ECrCY,MDqCSlC,ECrCH,IAAM,IDsCxB9G,EAAgBlG,EAAMmG,cAAcD,cACpCmK,EAAgBrQ,EAAMwG,MAAM7I,UAC5BoK,EAAa/H,EAAMwG,MAAM9I,OACzBwV,EAA4C,mBAAjBF,EAA8BA,EAAa3W,OAAOkE,OAAO,CAAC,EAAGP,EAAMwG,MAAO,CACvGzI,UAAWiC,EAAMjC,aACbiV,EACFG,EAA2D,iBAAtBD,EAAiC,CACxElG,SAAUkG,EACVhE,QAASgE,GACP7W,OAAOkE,OAAO,CAChByM,SAAU,EACVkC,QAAS,GACRgE,GACCE,EAAsBpT,EAAMmG,cAAckB,OAASrH,EAAMmG,cAAckB,OAAOrH,EAAMjC,WAAa,KACjG2L,EAAO,CACTnG,EAAG,EACHE,EAAG,GAGL,GAAKyC,EAAL,CAIA,GAAI8I,EAAe,CACjB,IAAIqE,EAEAC,EAAwB,MAAbtG,EAAmB,EAAM7P,EACpCoW,EAAuB,MAAbvG,EAAmB/P,EAASC,EACtCoJ,EAAmB,MAAb0G,EAAmB,SAAW,QACpC3F,EAASnB,EAAc8G,GACvBtL,EAAM2F,EAAS8D,EAASmI,GACxB7R,EAAM4F,EAAS8D,EAASoI,GACxBC,EAAWV,GAAU/K,EAAWzB,GAAO,EAAI,EAC3CmN,EAASzL,IAAc1K,EAAQ+S,EAAc/J,GAAOyB,EAAWzB,GAC/DoN,EAAS1L,IAAc1K,GAASyK,EAAWzB,IAAQ+J,EAAc/J,GAGjEL,EAAejG,EAAME,SAASgB,MAC9BwF,EAAYoM,GAAU7M,EAAetC,EAAcsC,GAAgB,CACrE/C,MAAO,EACPE,OAAQ,GAENuQ,GAAqB3T,EAAMmG,cAAc,oBAAsBnG,EAAMmG,cAAc,oBAAoBI,QxBhFtG,CACLvF,IAAK,EACL9D,MAAO,EACPD,OAAQ,EACRE,KAAM,GwB6EFyW,GAAkBD,GAAmBL,GACrCO,GAAkBF,GAAmBJ,GAMrCO,GAAWnO,EAAO,EAAG0K,EAAc/J,GAAMI,EAAUJ,IACnDyN,GAAYd,EAAkB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWF,GAAkBT,EAA4BnG,SAAWyG,EAASK,GAAWF,GAAkBT,EAA4BnG,SACxMgH,GAAYf,GAAmB5C,EAAc/J,GAAO,EAAIkN,EAAWM,GAAWD,GAAkBV,EAA4BnG,SAAW0G,EAASI,GAAWD,GAAkBV,EAA4BnG,SACzMjG,GAAoB/G,EAAME,SAASgB,OAAS8D,EAAgBhF,EAAME,SAASgB,OAC3E+S,GAAelN,GAAiC,MAAbiG,EAAmBjG,GAAkBsF,WAAa,EAAItF,GAAkBuF,YAAc,EAAI,EAC7H4H,GAAwH,OAAjGb,EAA+C,MAAvBD,OAA8B,EAASA,EAAoBpG,IAAqBqG,EAAwB,EAEvJc,GAAY9M,EAAS2M,GAAYE,GACjCE,GAAkBzO,EAAOmN,EAAS,EAAQpR,EAF9B2F,EAAS0M,GAAYG,GAAsBD,IAEKvS,EAAK2F,EAAQyL,EAAS,EAAQrR,EAAK0S,IAAa1S,GAChHyE,EAAc8G,GAAYoH,GAC1B1K,EAAKsD,GAAYoH,GAAkB/M,CACrC,CAEA,GAAI8H,EAAc,CAChB,IAAIkF,GAEAC,GAAyB,MAAbtH,EAAmB,EAAM7P,EAErCoX,GAAwB,MAAbvH,EAAmB/P,EAASC,EAEvCsX,GAAUtO,EAAcgJ,GAExBuF,GAAmB,MAAZvF,EAAkB,SAAW,QAEpCwF,GAAOF,GAAUrJ,EAASmJ,IAE1BK,GAAOH,GAAUrJ,EAASoJ,IAE1BK,IAAuD,IAAxC,CAAC,EAAKzX,GAAMqH,QAAQ4B,GAEnCyO,GAAyH,OAAjGR,GAAgD,MAAvBjB,OAA8B,EAASA,EAAoBlE,IAAoBmF,GAAyB,EAEzJS,GAAaF,GAAeF,GAAOF,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAEzI6F,GAAaH,GAAeJ,GAAUnE,EAAcoE,IAAQ1M,EAAW0M,IAAQI,GAAuB1B,EAA4BjE,QAAUyF,GAE5IK,GAAmBlC,GAAU8B,G1BzH9B,SAAwBlT,EAAK1E,EAAOyE,GACzC,IAAIwT,EAAItP,EAAOjE,EAAK1E,EAAOyE,GAC3B,OAAOwT,EAAIxT,EAAMA,EAAMwT,CACzB,C0BsHoDC,CAAeJ,GAAYN,GAASO,IAAcpP,EAAOmN,EAASgC,GAAaJ,GAAMF,GAAS1B,EAASiC,GAAaJ,IAEpKzO,EAAcgJ,GAAW8F,GACzBtL,EAAKwF,GAAW8F,GAAmBR,EACrC,CAEAxU,EAAMmG,cAAcxG,GAAQ+J,CAvE5B,CAwEF,EAQEhC,iBAAkB,CAAC,WE1HN,SAASyN,GAAiBC,EAAyBrQ,EAAcsD,QAC9D,IAAZA,IACFA,GAAU,GAGZ,ICnBoCrJ,ECJOJ,EFuBvCyW,EAA0B9V,EAAcwF,GACxCuQ,EAAuB/V,EAAcwF,IAf3C,SAAyBnG,GACvB,IAAImN,EAAOnN,EAAQ+D,wBACfI,EAASpB,EAAMoK,EAAK7I,OAAStE,EAAQqE,aAAe,EACpDD,EAASrB,EAAMoK,EAAK3I,QAAUxE,EAAQuE,cAAgB,EAC1D,OAAkB,IAAXJ,GAA2B,IAAXC,CACzB,CAU4DuS,CAAgBxQ,GACtEJ,EAAkBF,EAAmBM,GACrCgH,EAAOpJ,EAAsByS,EAAyBE,EAAsBjN,GAC5EyB,EAAS,CACXc,WAAY,EACZE,UAAW,GAET7C,EAAU,CACZ1E,EAAG,EACHE,EAAG,GAkBL,OAfI4R,IAA4BA,IAA4BhN,MACxB,SAA9B1J,EAAYoG,IAChBkG,GAAetG,MACbmF,GCnCgC9K,EDmCT+F,KClCdhG,EAAUC,IAAUO,EAAcP,GCJxC,CACL4L,YAFyChM,EDQbI,GCNR4L,WACpBE,UAAWlM,EAAQkM,WDGZH,GAAgB3L,IDoCnBO,EAAcwF,KAChBkD,EAAUtF,EAAsBoC,GAAc,IACtCxB,GAAKwB,EAAauH,WAC1BrE,EAAQxE,GAAKsB,EAAasH,WACjB1H,IACTsD,EAAQ1E,EAAIyH,GAAoBrG,KAI7B,CACLpB,EAAGwI,EAAK5O,KAAO2M,EAAOc,WAAa3C,EAAQ1E,EAC3CE,EAAGsI,EAAK/K,IAAM8I,EAAOgB,UAAY7C,EAAQxE,EACzCP,MAAO6I,EAAK7I,MACZE,OAAQ2I,EAAK3I,OAEjB,CGvDA,SAASoS,GAAMC,GACb,IAAItT,EAAM,IAAIoO,IACVmF,EAAU,IAAIC,IACdC,EAAS,GAKb,SAAS3F,EAAK4F,GACZH,EAAQI,IAAID,EAASlW,MACN,GAAG3B,OAAO6X,EAASxU,UAAY,GAAIwU,EAASnO,kBAAoB,IACtEvH,SAAQ,SAAU4V,GACzB,IAAKL,EAAQM,IAAID,GAAM,CACrB,IAAIE,EAAc9T,EAAI3F,IAAIuZ,GAEtBE,GACFhG,EAAKgG,EAET,CACF,IACAL,EAAO3E,KAAK4E,EACd,CAQA,OAzBAJ,EAAUtV,SAAQ,SAAU0V,GAC1B1T,EAAIiP,IAAIyE,EAASlW,KAAMkW,EACzB,IAiBAJ,EAAUtV,SAAQ,SAAU0V,GACrBH,EAAQM,IAAIH,EAASlW,OAExBsQ,EAAK4F,EAET,IACOD,CACT,CCvBA,IAAIM,GAAkB,CACpBnY,UAAW,SACX0X,UAAW,GACX1U,SAAU,YAGZ,SAASoV,KACP,IAAK,IAAI1B,EAAO2B,UAAUrG,OAAQsG,EAAO,IAAIpU,MAAMwS,GAAO6B,EAAO,EAAGA,EAAO7B,EAAM6B,IAC/ED,EAAKC,GAAQF,UAAUE,GAGzB,OAAQD,EAAKvE,MAAK,SAAUlT,GAC1B,QAASA,GAAoD,mBAAlCA,EAAQ+D,sBACrC,GACF,CAEO,SAAS4T,GAAgBC,QACL,IAArBA,IACFA,EAAmB,CAAC,GAGtB,IAAIC,EAAoBD,EACpBE,EAAwBD,EAAkBE,iBAC1CA,OAA6C,IAA1BD,EAAmC,GAAKA,EAC3DE,EAAyBH,EAAkBI,eAC3CA,OAA4C,IAA3BD,EAAoCV,GAAkBU,EAC3E,OAAO,SAAsBjZ,EAAWD,EAAQoD,QAC9B,IAAZA,IACFA,EAAU+V,GAGZ,ICxC6B/W,EAC3BgX,EDuCE9W,EAAQ,CACVjC,UAAW,SACXgZ,iBAAkB,GAClBjW,QAASzE,OAAOkE,OAAO,CAAC,EAAG2V,GAAiBW,GAC5C1Q,cAAe,CAAC,EAChBjG,SAAU,CACRvC,UAAWA,EACXD,OAAQA,GAEV4C,WAAY,CAAC,EACbD,OAAQ,CAAC,GAEP2W,EAAmB,GACnBC,GAAc,EACdrN,EAAW,CACb5J,MAAOA,EACPkX,WAAY,SAAoBC,GAC9B,IAAIrW,EAAsC,mBAArBqW,EAAkCA,EAAiBnX,EAAMc,SAAWqW,EACzFC,IACApX,EAAMc,QAAUzE,OAAOkE,OAAO,CAAC,EAAGsW,EAAgB7W,EAAMc,QAASA,GACjEd,EAAMiK,cAAgB,CACpBtM,UAAW0B,EAAU1B,GAAa6N,GAAkB7N,GAAaA,EAAU4Q,eAAiB/C,GAAkB7N,EAAU4Q,gBAAkB,GAC1I7Q,OAAQ8N,GAAkB9N,IAI5B,IElE4B+X,EAC9B4B,EFiEMN,EDhCG,SAAwBtB,GAErC,IAAIsB,EAAmBvB,GAAMC,GAE7B,OAAO/W,EAAeb,QAAO,SAAUC,EAAK+B,GAC1C,OAAO/B,EAAIE,OAAO+Y,EAAiBvR,QAAO,SAAUqQ,GAClD,OAAOA,EAAShW,QAAUA,CAC5B,IACF,GAAG,GACL,CCuB+ByX,EElEK7B,EFkEsB,GAAGzX,OAAO2Y,EAAkB3W,EAAMc,QAAQ2U,WEjE9F4B,EAAS5B,EAAU5X,QAAO,SAAUwZ,EAAQE,GAC9C,IAAIC,EAAWH,EAAOE,EAAQ5X,MAK9B,OAJA0X,EAAOE,EAAQ5X,MAAQ6X,EAAWnb,OAAOkE,OAAO,CAAC,EAAGiX,EAAUD,EAAS,CACrEzW,QAASzE,OAAOkE,OAAO,CAAC,EAAGiX,EAAS1W,QAASyW,EAAQzW,SACrD4I,KAAMrN,OAAOkE,OAAO,CAAC,EAAGiX,EAAS9N,KAAM6N,EAAQ7N,QAC5C6N,EACEF,CACT,GAAG,CAAC,GAEGhb,OAAO4D,KAAKoX,GAAQlV,KAAI,SAAUhG,GACvC,OAAOkb,EAAOlb,EAChB,MF4DM,OAJA6D,EAAM+W,iBAAmBA,EAAiBvR,QAAO,SAAUiS,GACzD,OAAOA,EAAE7X,OACX,IA+FFI,EAAM+W,iBAAiB5W,SAAQ,SAAUJ,GACvC,IAAIJ,EAAOI,EAAKJ,KACZ+X,EAAe3X,EAAKe,QACpBA,OAA2B,IAAjB4W,EAA0B,CAAC,EAAIA,EACzChX,EAASX,EAAKW,OAElB,GAAsB,mBAAXA,EAAuB,CAChC,IAAIiX,EAAYjX,EAAO,CACrBV,MAAOA,EACPL,KAAMA,EACNiK,SAAUA,EACV9I,QAASA,IAKXkW,EAAiB/F,KAAK0G,GAFT,WAAmB,EAGlC,CACF,IA/GS/N,EAASQ,QAClB,EAMAwN,YAAa,WACX,IAAIX,EAAJ,CAIA,IAAIY,EAAkB7X,EAAME,SACxBvC,EAAYka,EAAgBla,UAC5BD,EAASma,EAAgBna,OAG7B,GAAKyY,GAAiBxY,EAAWD,GAAjC,CAKAsC,EAAMwG,MAAQ,CACZ7I,UAAWwX,GAAiBxX,EAAWqH,EAAgBtH,GAAoC,UAA3BsC,EAAMc,QAAQC,UAC9ErD,OAAQiG,EAAcjG,IAOxBsC,EAAM0R,OAAQ,EACd1R,EAAMjC,UAAYiC,EAAMc,QAAQ/C,UAKhCiC,EAAM+W,iBAAiB5W,SAAQ,SAAU0V,GACvC,OAAO7V,EAAMmG,cAAc0P,EAASlW,MAAQtD,OAAOkE,OAAO,CAAC,EAAGsV,EAASnM,KACzE,IAEA,IAAK,IAAIoO,EAAQ,EAAGA,EAAQ9X,EAAM+W,iBAAiBhH,OAAQ+H,IACzD,IAAoB,IAAhB9X,EAAM0R,MAAV,CAMA,IAAIqG,EAAwB/X,EAAM+W,iBAAiBe,GAC/ChY,EAAKiY,EAAsBjY,GAC3BkY,EAAyBD,EAAsBjX,QAC/CoM,OAAsC,IAA3B8K,EAAoC,CAAC,EAAIA,EACpDrY,EAAOoY,EAAsBpY,KAEf,mBAAPG,IACTE,EAAQF,EAAG,CACTE,MAAOA,EACPc,QAASoM,EACTvN,KAAMA,EACNiK,SAAUA,KACN5J,EAdR,MAHEA,EAAM0R,OAAQ,EACdoG,GAAS,CAzBb,CATA,CAqDF,EAGA1N,QC1I2BtK,ED0IV,WACf,OAAO,IAAImY,SAAQ,SAAUC,GAC3BtO,EAASgO,cACTM,EAAQlY,EACV,GACF,EC7IG,WAUL,OATK8W,IACHA,EAAU,IAAImB,SAAQ,SAAUC,GAC9BD,QAAQC,UAAUC,MAAK,WACrBrB,OAAUsB,EACVF,EAAQpY,IACV,GACF,KAGKgX,CACT,GDmIIuB,QAAS,WACPjB,IACAH,GAAc,CAChB,GAGF,IAAKd,GAAiBxY,EAAWD,GAC/B,OAAOkM,EAmCT,SAASwN,IACPJ,EAAiB7W,SAAQ,SAAUL,GACjC,OAAOA,GACT,IACAkX,EAAmB,EACrB,CAEA,OAvCApN,EAASsN,WAAWpW,GAASqX,MAAK,SAAUnY,IACrCiX,GAAenW,EAAQwX,eAC1BxX,EAAQwX,cAActY,EAE1B,IAmCO4J,CACT,CACF,CACO,IAAI2O,GAA4BhC,KGzLnC,GAA4BA,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,EAAa,GAAQ,GAAM,GAAiB,EAAO,MCJrH,GAA4BjC,GAAgB,CAC9CI,iBAFqB,CAAC6B,GAAgB,GAAe,GAAe,KCatE,MAAMC,GAAa,IAAIlI,IACjBmI,GAAO,CACX,GAAAtH,CAAIxS,EAASzC,EAAKyN,GACX6O,GAAWzC,IAAIpX,IAClB6Z,GAAWrH,IAAIxS,EAAS,IAAI2R,KAE9B,MAAMoI,EAAcF,GAAWjc,IAAIoC,GAI9B+Z,EAAY3C,IAAI7Z,IAA6B,IAArBwc,EAAYC,KAKzCD,EAAYvH,IAAIjV,EAAKyN,GAHnBiP,QAAQC,MAAM,+EAA+E7W,MAAM8W,KAAKJ,EAAY1Y,QAAQ,MAIhI,EACAzD,IAAG,CAACoC,EAASzC,IACPsc,GAAWzC,IAAIpX,IACV6Z,GAAWjc,IAAIoC,GAASpC,IAAIL,IAE9B,KAET,MAAA6c,CAAOpa,EAASzC,GACd,IAAKsc,GAAWzC,IAAIpX,GAClB,OAEF,MAAM+Z,EAAcF,GAAWjc,IAAIoC,GACnC+Z,EAAYM,OAAO9c,GAGM,IAArBwc,EAAYC,MACdH,GAAWQ,OAAOra,EAEtB,GAYIsa,GAAiB,gBAOjBC,GAAgBC,IAChBA,GAAYna,OAAOoa,KAAOpa,OAAOoa,IAAIC,SAEvCF,EAAWA,EAAS5O,QAAQ,iBAAiB,CAAC+O,EAAOC,IAAO,IAAIH,IAAIC,OAAOE,QAEtEJ,GA4CHK,GAAuB7a,IAC3BA,EAAQ8a,cAAc,IAAIC,MAAMT,IAAgB,EAE5C,GAAYU,MACXA,GAA4B,iBAAXA,UAGO,IAAlBA,EAAOC,SAChBD,EAASA,EAAO,SAEgB,IAApBA,EAAOE,UAEjBC,GAAaH,GAEb,GAAUA,GACLA,EAAOC,OAASD,EAAO,GAAKA,EAEf,iBAAXA,GAAuBA,EAAO7J,OAAS,EACzCrL,SAAS+C,cAAc0R,GAAcS,IAEvC,KAEHI,GAAYpb,IAChB,IAAK,GAAUA,IAAgD,IAApCA,EAAQqb,iBAAiBlK,OAClD,OAAO,EAET,MAAMmK,EAAgF,YAA7D5V,iBAAiB1F,GAASub,iBAAiB,cAE9DC,EAAgBxb,EAAQyb,QAAQ,uBACtC,IAAKD,EACH,OAAOF,EAET,GAAIE,IAAkBxb,EAAS,CAC7B,MAAM0b,EAAU1b,EAAQyb,QAAQ,WAChC,GAAIC,GAAWA,EAAQlW,aAAegW,EACpC,OAAO,EAET,GAAgB,OAAZE,EACF,OAAO,CAEX,CACA,OAAOJ,CAAgB,EAEnBK,GAAa3b,IACZA,GAAWA,EAAQkb,WAAaU,KAAKC,gBAGtC7b,EAAQ8b,UAAU7W,SAAS,mBAGC,IAArBjF,EAAQ+b,SACV/b,EAAQ+b,SAEV/b,EAAQgc,aAAa,aAAoD,UAArChc,EAAQic,aAAa,aAE5DC,GAAiBlc,IACrB,IAAK8F,SAASC,gBAAgBoW,aAC5B,OAAO,KAIT,GAAmC,mBAAxBnc,EAAQqF,YAA4B,CAC7C,MAAM+W,EAAOpc,EAAQqF,cACrB,OAAO+W,aAAgBtb,WAAasb,EAAO,IAC7C,CACA,OAAIpc,aAAmBc,WACdd,EAIJA,EAAQwF,WAGN0W,GAAelc,EAAQwF,YAFrB,IAEgC,EAErC6W,GAAO,OAUPC,GAAStc,IACbA,EAAQuE,YAAY,EAGhBgY,GAAY,IACZlc,OAAOmc,SAAW1W,SAAS6G,KAAKqP,aAAa,qBACxC3b,OAAOmc,OAET,KAEHC,GAA4B,GAgB5BC,GAAQ,IAAuC,QAAjC5W,SAASC,gBAAgB4W,IACvCC,GAAqBC,IAhBAC,QAiBN,KACjB,MAAMC,EAAIR,KAEV,GAAIQ,EAAG,CACL,MAAMhc,EAAO8b,EAAOG,KACdC,EAAqBF,EAAE7b,GAAGH,GAChCgc,EAAE7b,GAAGH,GAAQ8b,EAAOK,gBACpBH,EAAE7b,GAAGH,GAAMoc,YAAcN,EACzBE,EAAE7b,GAAGH,GAAMqc,WAAa,KACtBL,EAAE7b,GAAGH,GAAQkc,EACNJ,EAAOK,gBAElB,GA5B0B,YAAxBpX,SAASuX,YAENZ,GAA0BtL,QAC7BrL,SAASyF,iBAAiB,oBAAoB,KAC5C,IAAK,MAAMuR,KAAYL,GACrBK,GACF,IAGJL,GAA0BpK,KAAKyK,IAE/BA,GAkBA,EAEEQ,GAAU,CAACC,EAAkB9F,EAAO,GAAI+F,EAAeD,IACxB,mBAArBA,EAAkCA,KAAoB9F,GAAQ+F,EAExEC,GAAyB,CAACX,EAAUY,EAAmBC,GAAoB,KAC/E,IAAKA,EAEH,YADAL,GAAQR,GAGV,MACMc,EAhKiC5d,KACvC,IAAKA,EACH,OAAO,EAIT,IAAI,mBACF6d,EAAkB,gBAClBC,GACEzd,OAAOqF,iBAAiB1F,GAC5B,MAAM+d,EAA0BC,OAAOC,WAAWJ,GAC5CK,EAAuBF,OAAOC,WAAWH,GAG/C,OAAKC,GAA4BG,GAKjCL,EAAqBA,EAAmBlb,MAAM,KAAK,GACnDmb,EAAkBA,EAAgBnb,MAAM,KAAK,GAtDf,KAuDtBqb,OAAOC,WAAWJ,GAAsBG,OAAOC,WAAWH,KANzD,CAMoG,EA2IpFK,CAAiCT,GADlC,EAExB,IAAIU,GAAS,EACb,MAAMC,EAAU,EACdrR,aAEIA,IAAW0Q,IAGfU,GAAS,EACTV,EAAkBjS,oBAAoB6O,GAAgB+D,GACtDf,GAAQR,GAAS,EAEnBY,EAAkBnS,iBAAiB+O,GAAgB+D,GACnDC,YAAW,KACJF,GACHvD,GAAqB6C,EACvB,GACCE,EAAiB,EAYhBW,GAAuB,CAAC1R,EAAM2R,EAAeC,EAAeC,KAChE,MAAMC,EAAa9R,EAAKsE,OACxB,IAAI+H,EAAQrM,EAAKjH,QAAQ4Y,GAIzB,OAAe,IAAXtF,GACMuF,GAAiBC,EAAiB7R,EAAK8R,EAAa,GAAK9R,EAAK,IAExEqM,GAASuF,EAAgB,GAAK,EAC1BC,IACFxF,GAASA,EAAQyF,GAAcA,GAE1B9R,EAAKjK,KAAKC,IAAI,EAAGD,KAAKE,IAAIoW,EAAOyF,EAAa,KAAI,EAerDC,GAAiB,qBACjBC,GAAiB,OACjBC,GAAgB,SAChBC,GAAgB,CAAC,EACvB,IAAIC,GAAW,EACf,MAAMC,GAAe,CACnBC,WAAY,YACZC,WAAY,YAERC,GAAe,IAAIrI,IAAI,CAAC,QAAS,WAAY,UAAW,YAAa,cAAe,aAAc,iBAAkB,YAAa,WAAY,YAAa,cAAe,YAAa,UAAW,WAAY,QAAS,oBAAqB,aAAc,YAAa,WAAY,cAAe,cAAe,cAAe,YAAa,eAAgB,gBAAiB,eAAgB,gBAAiB,aAAc,QAAS,OAAQ,SAAU,QAAS,SAAU,SAAU,UAAW,WAAY,OAAQ,SAAU,eAAgB,SAAU,OAAQ,mBAAoB,mBAAoB,QAAS,QAAS,WAM/lB,SAASsI,GAAarf,EAASsf,GAC7B,OAAOA,GAAO,GAAGA,MAAQN,QAAgBhf,EAAQgf,UAAYA,IAC/D,CACA,SAASO,GAAiBvf,GACxB,MAAMsf,EAAMD,GAAarf,GAGzB,OAFAA,EAAQgf,SAAWM,EACnBP,GAAcO,GAAOP,GAAcO,IAAQ,CAAC,EACrCP,GAAcO,EACvB,CAiCA,SAASE,GAAYC,EAAQC,EAAUC,EAAqB,MAC1D,OAAOliB,OAAOmiB,OAAOH,GAAQ7M,MAAKiN,GAASA,EAAMH,WAAaA,GAAYG,EAAMF,qBAAuBA,GACzG,CACA,SAASG,GAAoBC,EAAmB1B,EAAS2B,GACvD,MAAMC,EAAiC,iBAAZ5B,EAErBqB,EAAWO,EAAcD,EAAqB3B,GAAW2B,EAC/D,IAAIE,EAAYC,GAAaJ,GAI7B,OAHKX,GAAahI,IAAI8I,KACpBA,EAAYH,GAEP,CAACE,EAAaP,EAAUQ,EACjC,CACA,SAASE,GAAWpgB,EAAS+f,EAAmB1B,EAAS2B,EAAoBK,GAC3E,GAAiC,iBAAtBN,IAAmC/f,EAC5C,OAEF,IAAKigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GAIzF,GAAID,KAAqBd,GAAc,CACrC,MAAMqB,EAAepf,GACZ,SAAU2e,GACf,IAAKA,EAAMU,eAAiBV,EAAMU,gBAAkBV,EAAMW,iBAAmBX,EAAMW,eAAevb,SAAS4a,EAAMU,eAC/G,OAAOrf,EAAGjD,KAAKwiB,KAAMZ,EAEzB,EAEFH,EAAWY,EAAaZ,EAC1B,CACA,MAAMD,EAASF,GAAiBvf,GAC1B0gB,EAAWjB,EAAOS,KAAeT,EAAOS,GAAa,CAAC,GACtDS,EAAmBnB,GAAYkB,EAAUhB,EAAUO,EAAc5B,EAAU,MACjF,GAAIsC,EAEF,YADAA,EAAiBN,OAASM,EAAiBN,QAAUA,GAGvD,MAAMf,EAAMD,GAAaK,EAAUK,EAAkBnU,QAAQgT,GAAgB,KACvE1d,EAAK+e,EA5Db,SAAoCjgB,EAASwa,EAAUtZ,GACrD,OAAO,SAASmd,EAAQwB,GACtB,MAAMe,EAAc5gB,EAAQ6gB,iBAAiBrG,GAC7C,IAAK,IAAI,OACPxN,GACE6S,EAAO7S,GAAUA,IAAWyT,KAAMzT,EAASA,EAAOxH,WACpD,IAAK,MAAMsb,KAAcF,EACvB,GAAIE,IAAe9T,EASnB,OANA+T,GAAWlB,EAAO,CAChBW,eAAgBxT,IAEdqR,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAM1G,EAAUtZ,GAE3CA,EAAGigB,MAAMnU,EAAQ,CAAC6S,GAG/B,CACF,CAwC2BuB,CAA2BphB,EAASqe,EAASqB,GAvExE,SAA0B1f,EAASkB,GACjC,OAAO,SAASmd,EAAQwB,GAOtB,OANAkB,GAAWlB,EAAO,CAChBW,eAAgBxgB,IAEdqe,EAAQgC,QACVW,GAAaC,IAAIjhB,EAAS6f,EAAMqB,KAAMhgB,GAEjCA,EAAGigB,MAAMnhB,EAAS,CAAC6f,GAC5B,CACF,CA6DoFwB,CAAiBrhB,EAAS0f,GAC5Gxe,EAAGye,mBAAqBM,EAAc5B,EAAU,KAChDnd,EAAGwe,SAAWA,EACdxe,EAAGmf,OAASA,EACZnf,EAAG8d,SAAWM,EACdoB,EAASpB,GAAOpe,EAChBlB,EAAQuL,iBAAiB2U,EAAWhf,EAAI+e,EAC1C,CACA,SAASqB,GAActhB,EAASyf,EAAQS,EAAW7B,EAASsB,GAC1D,MAAMze,EAAKse,GAAYC,EAAOS,GAAY7B,EAASsB,GAC9Cze,IAGLlB,EAAQyL,oBAAoByU,EAAWhf,EAAIqgB,QAAQ5B,WAC5CF,EAAOS,GAAWhf,EAAG8d,UAC9B,CACA,SAASwC,GAAyBxhB,EAASyf,EAAQS,EAAWuB,GAC5D,MAAMC,EAAoBjC,EAAOS,IAAc,CAAC,EAChD,IAAK,MAAOyB,EAAY9B,KAAUpiB,OAAOmkB,QAAQF,GAC3CC,EAAWE,SAASJ,IACtBH,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAGtE,CACA,SAASQ,GAAaN,GAGpB,OADAA,EAAQA,EAAMjU,QAAQiT,GAAgB,IAC/BI,GAAaY,IAAUA,CAChC,CACA,MAAMmB,GAAe,CACnB,EAAAc,CAAG9hB,EAAS6f,EAAOxB,EAAS2B,GAC1BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAA+B,CAAI/hB,EAAS6f,EAAOxB,EAAS2B,GAC3BI,GAAWpgB,EAAS6f,EAAOxB,EAAS2B,GAAoB,EAC1D,EACA,GAAAiB,CAAIjhB,EAAS+f,EAAmB1B,EAAS2B,GACvC,GAAiC,iBAAtBD,IAAmC/f,EAC5C,OAEF,MAAOigB,EAAaP,EAAUQ,GAAaJ,GAAoBC,EAAmB1B,EAAS2B,GACrFgC,EAAc9B,IAAcH,EAC5BN,EAASF,GAAiBvf,GAC1B0hB,EAAoBjC,EAAOS,IAAc,CAAC,EAC1C+B,EAAclC,EAAkBmC,WAAW,KACjD,QAAwB,IAAbxC,EAAX,CAQA,GAAIuC,EACF,IAAK,MAAME,KAAgB1kB,OAAO4D,KAAKoe,GACrC+B,GAAyBxhB,EAASyf,EAAQ0C,EAAcpC,EAAkBlN,MAAM,IAGpF,IAAK,MAAOuP,EAAavC,KAAUpiB,OAAOmkB,QAAQF,GAAoB,CACpE,MAAMC,EAAaS,EAAYxW,QAAQkT,GAAe,IACjDkD,IAAejC,EAAkB8B,SAASF,IAC7CL,GAActhB,EAASyf,EAAQS,EAAWL,EAAMH,SAAUG,EAAMF,mBAEpE,CAXA,KAPA,CAEE,IAAKliB,OAAO4D,KAAKqgB,GAAmBvQ,OAClC,OAEFmQ,GAActhB,EAASyf,EAAQS,EAAWR,EAAUO,EAAc5B,EAAU,KAE9E,CAYF,EACA,OAAAgE,CAAQriB,EAAS6f,EAAOpI,GACtB,GAAqB,iBAAVoI,IAAuB7f,EAChC,OAAO,KAET,MAAM+c,EAAIR,KAGV,IAAI+F,EAAc,KACdC,GAAU,EACVC,GAAiB,EACjBC,GAAmB,EAJH5C,IADFM,GAAaN,IAMZ9C,IACjBuF,EAAcvF,EAAEhC,MAAM8E,EAAOpI,GAC7BsF,EAAE/c,GAASqiB,QAAQC,GACnBC,GAAWD,EAAYI,uBACvBF,GAAkBF,EAAYK,gCAC9BF,EAAmBH,EAAYM,sBAEjC,MAAMC,EAAM9B,GAAW,IAAIhG,MAAM8E,EAAO,CACtC0C,UACAO,YAAY,IACVrL,GAUJ,OATIgL,GACFI,EAAIE,iBAEFP,GACFxiB,EAAQ8a,cAAc+H,GAEpBA,EAAIJ,kBAAoBH,GAC1BA,EAAYS,iBAEPF,CACT,GAEF,SAAS9B,GAAWljB,EAAKmlB,EAAO,CAAC,GAC/B,IAAK,MAAOzlB,EAAKa,KAAUX,OAAOmkB,QAAQoB,GACxC,IACEnlB,EAAIN,GAAOa,CACb,CAAE,MAAO6kB,GACPxlB,OAAOC,eAAeG,EAAKN,EAAK,CAC9B2lB,cAAc,EACdtlB,IAAG,IACMQ,GAGb,CAEF,OAAOP,CACT,CASA,SAASslB,GAAc/kB,GACrB,GAAc,SAAVA,EACF,OAAO,EAET,GAAc,UAAVA,EACF,OAAO,EAET,GAAIA,IAAU4f,OAAO5f,GAAOkC,WAC1B,OAAO0d,OAAO5f,GAEhB,GAAc,KAAVA,GAA0B,SAAVA,EAClB,OAAO,KAET,GAAqB,iBAAVA,EACT,OAAOA,EAET,IACE,OAAOglB,KAAKC,MAAMC,mBAAmBllB,GACvC,CAAE,MAAO6kB,GACP,OAAO7kB,CACT,CACF,CACA,SAASmlB,GAAiBhmB,GACxB,OAAOA,EAAIqO,QAAQ,UAAU4X,GAAO,IAAIA,EAAItjB,iBAC9C,CACA,MAAMujB,GAAc,CAClB,gBAAAC,CAAiB1jB,EAASzC,EAAKa,GAC7B4B,EAAQ6B,aAAa,WAAW0hB,GAAiBhmB,KAAQa,EAC3D,EACA,mBAAAulB,CAAoB3jB,EAASzC,GAC3ByC,EAAQ4B,gBAAgB,WAAW2hB,GAAiBhmB,KACtD,EACA,iBAAAqmB,CAAkB5jB,GAChB,IAAKA,EACH,MAAO,CAAC,EAEV,MAAM0B,EAAa,CAAC,EACdmiB,EAASpmB,OAAO4D,KAAKrB,EAAQ8jB,SAASld,QAAOrJ,GAAOA,EAAI2kB,WAAW,QAAU3kB,EAAI2kB,WAAW,cAClG,IAAK,MAAM3kB,KAAOsmB,EAAQ,CACxB,IAAIE,EAAUxmB,EAAIqO,QAAQ,MAAO,IACjCmY,EAAUA,EAAQC,OAAO,GAAG9jB,cAAgB6jB,EAAQlR,MAAM,EAAGkR,EAAQ5S,QACrEzP,EAAWqiB,GAAWZ,GAAcnjB,EAAQ8jB,QAAQvmB,GACtD,CACA,OAAOmE,CACT,EACAuiB,iBAAgB,CAACjkB,EAASzC,IACjB4lB,GAAcnjB,EAAQic,aAAa,WAAWsH,GAAiBhmB,QAgB1E,MAAM2mB,GAEJ,kBAAWC,GACT,MAAO,CAAC,CACV,CACA,sBAAWC,GACT,MAAO,CAAC,CACV,CACA,eAAWpH,GACT,MAAM,IAAIqH,MAAM,sEAClB,CACA,UAAAC,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAChB,OAAOA,CACT,CACA,eAAAC,CAAgBD,EAAQvkB,GACtB,MAAM2kB,EAAa,GAAU3kB,GAAWyjB,GAAYQ,iBAAiBjkB,EAAS,UAAY,CAAC,EAE3F,MAAO,IACFygB,KAAKmE,YAAYT,WACM,iBAAfQ,EAA0BA,EAAa,CAAC,KAC/C,GAAU3kB,GAAWyjB,GAAYG,kBAAkB5jB,GAAW,CAAC,KAC7C,iBAAXukB,EAAsBA,EAAS,CAAC,EAE/C,CACA,gBAAAG,CAAiBH,EAAQM,EAAcpE,KAAKmE,YAAYR,aACtD,IAAK,MAAO7hB,EAAUuiB,KAAkBrnB,OAAOmkB,QAAQiD,GAAc,CACnE,MAAMzmB,EAAQmmB,EAAOhiB,GACfwiB,EAAY,GAAU3mB,GAAS,UAjiBrC4c,OADSA,EAkiB+C5c,GAhiBnD,GAAG4c,IAELvd,OAAOM,UAAUuC,SAASrC,KAAK+c,GAAQL,MAAM,eAAe,GAAGza,cA+hBlE,IAAK,IAAI8kB,OAAOF,GAAehhB,KAAKihB,GAClC,MAAM,IAAIE,UAAU,GAAGxE,KAAKmE,YAAY5H,KAAKkI,0BAA0B3iB,qBAA4BwiB,yBAAiCD,MAExI,CAtiBW9J,KAuiBb,EAqBF,MAAMmK,WAAsBjB,GAC1B,WAAAU,CAAY5kB,EAASukB,GACnBa,SACAplB,EAAUmb,GAAWnb,MAIrBygB,KAAK4E,SAAWrlB,EAChBygB,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/BzK,GAAKtH,IAAIiO,KAAK4E,SAAU5E,KAAKmE,YAAYW,SAAU9E,MACrD,CAGA,OAAA+E,GACE1L,GAAKM,OAAOqG,KAAK4E,SAAU5E,KAAKmE,YAAYW,UAC5CvE,GAAaC,IAAIR,KAAK4E,SAAU5E,KAAKmE,YAAYa,WACjD,IAAK,MAAMC,KAAgBjoB,OAAOkoB,oBAAoBlF,MACpDA,KAAKiF,GAAgB,IAEzB,CACA,cAAAE,CAAe9I,EAAU9c,EAAS6lB,GAAa,GAC7CpI,GAAuBX,EAAU9c,EAAS6lB,EAC5C,CACA,UAAAvB,CAAWC,GAIT,OAHAA,EAAS9D,KAAK+D,gBAAgBD,EAAQ9D,KAAK4E,UAC3Cd,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CAGA,kBAAOuB,CAAY9lB,GACjB,OAAO8Z,GAAKlc,IAAIud,GAAWnb,GAAUygB,KAAK8E,SAC5C,CACA,0BAAOQ,CAAoB/lB,EAASukB,EAAS,CAAC,GAC5C,OAAO9D,KAAKqF,YAAY9lB,IAAY,IAAIygB,KAAKzgB,EAA2B,iBAAXukB,EAAsBA,EAAS,KAC9F,CACA,kBAAWyB,GACT,MA5CY,OA6Cd,CACA,mBAAWT,GACT,MAAO,MAAM9E,KAAKzD,MACpB,CACA,oBAAWyI,GACT,MAAO,IAAIhF,KAAK8E,UAClB,CACA,gBAAOU,CAAUllB,GACf,MAAO,GAAGA,IAAO0f,KAAKgF,WACxB,EAUF,MAAMS,GAAclmB,IAClB,IAAIwa,EAAWxa,EAAQic,aAAa,kBACpC,IAAKzB,GAAyB,MAAbA,EAAkB,CACjC,IAAI2L,EAAgBnmB,EAAQic,aAAa,QAMzC,IAAKkK,IAAkBA,EAActE,SAAS,OAASsE,EAAcjE,WAAW,KAC9E,OAAO,KAILiE,EAActE,SAAS,OAASsE,EAAcjE,WAAW,OAC3DiE,EAAgB,IAAIA,EAAcxjB,MAAM,KAAK,MAE/C6X,EAAW2L,GAAmC,MAAlBA,EAAwB5L,GAAc4L,EAAcC,QAAU,IAC5F,CACA,OAAO5L,CAAQ,EAEX6L,GAAiB,CACrBzT,KAAI,CAAC4H,EAAUxa,EAAU8F,SAASC,kBACzB,GAAG3G,UAAUsB,QAAQ3C,UAAU8iB,iBAAiB5iB,KAAK+B,EAASwa,IAEvE8L,QAAO,CAAC9L,EAAUxa,EAAU8F,SAASC,kBAC5BrF,QAAQ3C,UAAU8K,cAAc5K,KAAK+B,EAASwa,GAEvD+L,SAAQ,CAACvmB,EAASwa,IACT,GAAGpb,UAAUY,EAAQumB,UAAU3f,QAAOzB,GAASA,EAAMqhB,QAAQhM,KAEtE,OAAAiM,CAAQzmB,EAASwa,GACf,MAAMiM,EAAU,GAChB,IAAIC,EAAW1mB,EAAQwF,WAAWiW,QAAQjB,GAC1C,KAAOkM,GACLD,EAAQpU,KAAKqU,GACbA,EAAWA,EAASlhB,WAAWiW,QAAQjB,GAEzC,OAAOiM,CACT,EACA,IAAAE,CAAK3mB,EAASwa,GACZ,IAAIoM,EAAW5mB,EAAQ6mB,uBACvB,KAAOD,GAAU,CACf,GAAIA,EAASJ,QAAQhM,GACnB,MAAO,CAACoM,GAEVA,EAAWA,EAASC,sBACtB,CACA,MAAO,EACT,EAEA,IAAAvhB,CAAKtF,EAASwa,GACZ,IAAIlV,EAAOtF,EAAQ8mB,mBACnB,KAAOxhB,GAAM,CACX,GAAIA,EAAKkhB,QAAQhM,GACf,MAAO,CAAClV,GAEVA,EAAOA,EAAKwhB,kBACd,CACA,MAAO,EACT,EACA,iBAAAC,CAAkB/mB,GAChB,MAAMgnB,EAAa,CAAC,IAAK,SAAU,QAAS,WAAY,SAAU,UAAW,aAAc,4BAA4BzjB,KAAIiX,GAAY,GAAGA,2BAAiC7W,KAAK,KAChL,OAAO8c,KAAK7N,KAAKoU,EAAYhnB,GAAS4G,QAAOqgB,IAAOtL,GAAWsL,IAAO7L,GAAU6L,IAClF,EACA,sBAAAC,CAAuBlnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAIwa,GACK6L,GAAeC,QAAQ9L,GAAYA,EAErC,IACT,EACA,sBAAA2M,CAAuBnnB,GACrB,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAeC,QAAQ9L,GAAY,IACvD,EACA,+BAAA4M,CAAgCpnB,GAC9B,MAAMwa,EAAW0L,GAAYlmB,GAC7B,OAAOwa,EAAW6L,GAAezT,KAAK4H,GAAY,EACpD,GAUI6M,GAAuB,CAACC,EAAWC,EAAS,UAChD,MAAMC,EAAa,gBAAgBF,EAAU7B,YACvC1kB,EAAOumB,EAAUtK,KACvBgE,GAAac,GAAGhc,SAAU0hB,EAAY,qBAAqBzmB,OAAU,SAAU8e,GAI7E,GAHI,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEF,MAAMzT,EAASqZ,GAAec,uBAAuB1G,OAASA,KAAKhF,QAAQ,IAAI1a,KAC9DumB,EAAUvB,oBAAoB/Y,GAGtCua,IACX,GAAE,EAiBEG,GAAc,YACdC,GAAc,QAAQD,KACtBE,GAAe,SAASF,KAQ9B,MAAMG,WAAc1C,GAElB,eAAWnI,GACT,MAfW,OAgBb,CAGA,KAAA8K,GAEE,GADmB9G,GAAaqB,QAAQ5B,KAAK4E,SAAUsC,IACxClF,iBACb,OAEFhC,KAAK4E,SAASvJ,UAAU1B,OAlBF,QAmBtB,MAAMyL,EAAapF,KAAK4E,SAASvJ,UAAU7W,SApBrB,QAqBtBwb,KAAKmF,gBAAe,IAAMnF,KAAKsH,mBAAmBtH,KAAK4E,SAAUQ,EACnE,CAGA,eAAAkC,GACEtH,KAAK4E,SAASjL,SACd4G,GAAaqB,QAAQ5B,KAAK4E,SAAUuC,IACpCnH,KAAK+E,SACP,CAGA,sBAAOtI,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+c,GAAM9B,oBAAoBtF,MACvC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOF4G,GAAqBQ,GAAO,SAM5BjL,GAAmBiL,IAcnB,MAKMI,GAAyB,4BAO/B,MAAMC,WAAe/C,GAEnB,eAAWnI,GACT,MAfW,QAgBb,CAGA,MAAAmL,GAEE1H,KAAK4E,SAASxjB,aAAa,eAAgB4e,KAAK4E,SAASvJ,UAAUqM,OAjB3C,UAkB1B,CAGA,sBAAOjL,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOod,GAAOnC,oBAAoBtF,MACzB,WAAX8D,GACFzZ,EAAKyZ,IAET,GACF,EAOFvD,GAAac,GAAGhc,SAjCe,2BAiCmBmiB,IAAwBpI,IACxEA,EAAMkD,iBACN,MAAMqF,EAASvI,EAAM7S,OAAOyO,QAAQwM,IACvBC,GAAOnC,oBAAoBqC,GACnCD,QAAQ,IAOfvL,GAAmBsL,IAcnB,MACMG,GAAc,YACdC,GAAmB,aAAaD,KAChCE,GAAkB,YAAYF,KAC9BG,GAAiB,WAAWH,KAC5BI,GAAoB,cAAcJ,KAClCK,GAAkB,YAAYL,KAK9BM,GAAY,CAChBC,YAAa,KACbC,aAAc,KACdC,cAAe,MAEXC,GAAgB,CACpBH,YAAa,kBACbC,aAAc,kBACdC,cAAe,mBAOjB,MAAME,WAAc9E,GAClB,WAAAU,CAAY5kB,EAASukB,GACnBa,QACA3E,KAAK4E,SAAWrlB,EACXA,GAAYgpB,GAAMC,gBAGvBxI,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKyI,QAAU,EACfzI,KAAK0I,sBAAwB5H,QAAQlhB,OAAO+oB,cAC5C3I,KAAK4I,cACP,CAGA,kBAAWlF,GACT,OAAOwE,EACT,CACA,sBAAWvE,GACT,OAAO2E,EACT,CACA,eAAW/L,GACT,MA/CW,OAgDb,CAGA,OAAAwI,GACExE,GAAaC,IAAIR,KAAK4E,SAAUgD,GAClC,CAGA,MAAAiB,CAAOzJ,GACAY,KAAK0I,sBAIN1I,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,SAJrB/I,KAAKyI,QAAUrJ,EAAM4J,QAAQ,GAAGD,OAMpC,CACA,IAAAE,CAAK7J,GACCY,KAAK8I,wBAAwB1J,KAC/BY,KAAKyI,QAAUrJ,EAAM2J,QAAU/I,KAAKyI,SAEtCzI,KAAKkJ,eACLrM,GAAQmD,KAAK6E,QAAQsD,YACvB,CACA,KAAAgB,CAAM/J,GACJY,KAAKyI,QAAUrJ,EAAM4J,SAAW5J,EAAM4J,QAAQtY,OAAS,EAAI,EAAI0O,EAAM4J,QAAQ,GAAGD,QAAU/I,KAAKyI,OACjG,CACA,YAAAS,GACE,MAAME,EAAYjnB,KAAKoC,IAAIyb,KAAKyI,SAChC,GAAIW,GAnEgB,GAoElB,OAEF,MAAM9b,EAAY8b,EAAYpJ,KAAKyI,QACnCzI,KAAKyI,QAAU,EACVnb,GAGLuP,GAAQvP,EAAY,EAAI0S,KAAK6E,QAAQwD,cAAgBrI,KAAK6E,QAAQuD,aACpE,CACA,WAAAQ,GACM5I,KAAK0I,uBACPnI,GAAac,GAAGrB,KAAK4E,SAAUoD,IAAmB5I,GAASY,KAAK6I,OAAOzJ,KACvEmB,GAAac,GAAGrB,KAAK4E,SAAUqD,IAAiB7I,GAASY,KAAKiJ,KAAK7J,KACnEY,KAAK4E,SAASvJ,UAAU5E,IAlFG,mBAoF3B8J,GAAac,GAAGrB,KAAK4E,SAAUiD,IAAkBzI,GAASY,KAAK6I,OAAOzJ,KACtEmB,GAAac,GAAGrB,KAAK4E,SAAUkD,IAAiB1I,GAASY,KAAKmJ,MAAM/J,KACpEmB,GAAac,GAAGrB,KAAK4E,SAAUmD,IAAgB3I,GAASY,KAAKiJ,KAAK7J,KAEtE,CACA,uBAAA0J,CAAwB1J,GACtB,OAAOY,KAAK0I,wBA3FS,QA2FiBtJ,EAAMiK,aA5FrB,UA4FyDjK,EAAMiK,YACxF,CAGA,kBAAOb,GACL,MAAO,iBAAkBnjB,SAASC,iBAAmB7C,UAAU6mB,eAAiB,CAClF,EAeF,MAEMC,GAAc,eACdC,GAAiB,YAKjBC,GAAa,OACbC,GAAa,OACbC,GAAiB,OACjBC,GAAkB,QAClBC,GAAc,QAAQN,KACtBO,GAAa,OAAOP,KACpBQ,GAAkB,UAAUR,KAC5BS,GAAqB,aAAaT,KAClCU,GAAqB,aAAaV,KAClCW,GAAmB,YAAYX,KAC/BY,GAAwB,OAAOZ,KAAcC,KAC7CY,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAsB,WACtBC,GAAsB,SAMtBC,GAAkB,UAClBC,GAAgB,iBAChBC,GAAuBF,GAAkBC,GAKzCE,GAAmB,CACvB,UAAoBd,GACpB,WAAqBD,IAEjBgB,GAAY,CAChBC,SAAU,IACVC,UAAU,EACVC,MAAO,QACPC,MAAM,EACNC,OAAO,EACPC,MAAM,GAEFC,GAAgB,CACpBN,SAAU,mBAEVC,SAAU,UACVC,MAAO,mBACPC,KAAM,mBACNC,MAAO,UACPC,KAAM,WAOR,MAAME,WAAiBzG,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKoL,UAAY,KACjBpL,KAAKqL,eAAiB,KACtBrL,KAAKsL,YAAa,EAClBtL,KAAKuL,aAAe,KACpBvL,KAAKwL,aAAe,KACpBxL,KAAKyL,mBAAqB7F,GAAeC,QArCjB,uBAqC8C7F,KAAK4E,UAC3E5E,KAAK0L,qBACD1L,KAAK6E,QAAQkG,OAASV,IACxBrK,KAAK2L,OAET,CAGA,kBAAWjI,GACT,OAAOiH,EACT,CACA,sBAAWhH,GACT,OAAOuH,EACT,CACA,eAAW3O,GACT,MAnFW,UAoFb,CAGA,IAAA1X,GACEmb,KAAK4L,OAAOnC,GACd,CACA,eAAAoC,IAIOxmB,SAASymB,QAAUnR,GAAUqF,KAAK4E,WACrC5E,KAAKnb,MAET,CACA,IAAAqhB,GACElG,KAAK4L,OAAOlC,GACd,CACA,KAAAoB,GACM9K,KAAKsL,YACPlR,GAAqB4F,KAAK4E,UAE5B5E,KAAK+L,gBACP,CACA,KAAAJ,GACE3L,KAAK+L,iBACL/L,KAAKgM,kBACLhM,KAAKoL,UAAYa,aAAY,IAAMjM,KAAK6L,mBAAmB7L,KAAK6E,QAAQ+F,SAC1E,CACA,iBAAAsB,GACOlM,KAAK6E,QAAQkG,OAGd/K,KAAKsL,WACP/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAK2L,UAGzD3L,KAAK2L,QACP,CACA,EAAAQ,CAAG1T,GACD,MAAM2T,EAAQpM,KAAKqM,YACnB,GAAI5T,EAAQ2T,EAAM1b,OAAS,GAAK+H,EAAQ,EACtC,OAEF,GAAIuH,KAAKsL,WAEP,YADA/K,GAAae,IAAItB,KAAK4E,SAAUkF,IAAY,IAAM9J,KAAKmM,GAAG1T,KAG5D,MAAM6T,EAActM,KAAKuM,cAAcvM,KAAKwM,cAC5C,GAAIF,IAAgB7T,EAClB,OAEF,MAAMtC,EAAQsC,EAAQ6T,EAAc7C,GAAaC,GACjD1J,KAAK4L,OAAOzV,EAAOiW,EAAM3T,GAC3B,CACA,OAAAsM,GACM/E,KAAKwL,cACPxL,KAAKwL,aAAazG,UAEpBJ,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAEhB,OADAA,EAAO2I,gBAAkB3I,EAAO8G,SACzB9G,CACT,CACA,kBAAA4H,GACM1L,KAAK6E,QAAQgG,UACftK,GAAac,GAAGrB,KAAK4E,SAAUmF,IAAiB3K,GAASY,KAAK0M,SAAStN,KAE9C,UAAvBY,KAAK6E,QAAQiG,QACfvK,GAAac,GAAGrB,KAAK4E,SAAUoF,IAAoB,IAAMhK,KAAK8K,UAC9DvK,GAAac,GAAGrB,KAAK4E,SAAUqF,IAAoB,IAAMjK,KAAKkM,uBAE5DlM,KAAK6E,QAAQmG,OAASzC,GAAMC,eAC9BxI,KAAK2M,yBAET,CACA,uBAAAA,GACE,IAAK,MAAMC,KAAOhH,GAAezT,KArIX,qBAqImC6N,KAAK4E,UAC5DrE,GAAac,GAAGuL,EAAK1C,IAAkB9K,GAASA,EAAMkD,mBAExD,MAmBMuK,EAAc,CAClBzE,aAAc,IAAMpI,KAAK4L,OAAO5L,KAAK8M,kBAAkBnD,KACvDtB,cAAe,IAAMrI,KAAK4L,OAAO5L,KAAK8M,kBAAkBlD,KACxDzB,YAtBkB,KACS,UAAvBnI,KAAK6E,QAAQiG,QAYjB9K,KAAK8K,QACD9K,KAAKuL,cACPwB,aAAa/M,KAAKuL,cAEpBvL,KAAKuL,aAAe1N,YAAW,IAAMmC,KAAKkM,qBAjLjB,IAiL+DlM,KAAK6E,QAAQ+F,UAAS,GAOhH5K,KAAKwL,aAAe,IAAIjD,GAAMvI,KAAK4E,SAAUiI,EAC/C,CACA,QAAAH,CAAStN,GACP,GAAI,kBAAkB/b,KAAK+b,EAAM7S,OAAOya,SACtC,OAEF,MAAM1Z,EAAYod,GAAiBtL,EAAMtiB,KACrCwQ,IACF8R,EAAMkD,iBACNtC,KAAK4L,OAAO5L,KAAK8M,kBAAkBxf,IAEvC,CACA,aAAAif,CAAchtB,GACZ,OAAOygB,KAAKqM,YAAYlnB,QAAQ5F,EAClC,CACA,0BAAAytB,CAA2BvU,GACzB,IAAKuH,KAAKyL,mBACR,OAEF,MAAMwB,EAAkBrH,GAAeC,QAAQ0E,GAAiBvK,KAAKyL,oBACrEwB,EAAgB5R,UAAU1B,OAAO2Q,IACjC2C,EAAgB9rB,gBAAgB,gBAChC,MAAM+rB,EAAqBtH,GAAeC,QAAQ,sBAAsBpN,MAAWuH,KAAKyL,oBACpFyB,IACFA,EAAmB7R,UAAU5E,IAAI6T,IACjC4C,EAAmB9rB,aAAa,eAAgB,QAEpD,CACA,eAAA4qB,GACE,MAAMzsB,EAAUygB,KAAKqL,gBAAkBrL,KAAKwM,aAC5C,IAAKjtB,EACH,OAEF,MAAM4tB,EAAkB5P,OAAO6P,SAAS7tB,EAAQic,aAAa,oBAAqB,IAClFwE,KAAK6E,QAAQ+F,SAAWuC,GAAmBnN,KAAK6E,QAAQ4H,eAC1D,CACA,MAAAb,CAAOzV,EAAO5W,EAAU,MACtB,GAAIygB,KAAKsL,WACP,OAEF,MAAMvN,EAAgBiC,KAAKwM,aACrBa,EAASlX,IAAUsT,GACnB6D,EAAc/tB,GAAWue,GAAqBkC,KAAKqM,YAAatO,EAAesP,EAAQrN,KAAK6E,QAAQoG,MAC1G,GAAIqC,IAAgBvP,EAClB,OAEF,MAAMwP,EAAmBvN,KAAKuM,cAAce,GACtCE,EAAehI,GACZjF,GAAaqB,QAAQ5B,KAAK4E,SAAUY,EAAW,CACpD1F,cAAewN,EACfhgB,UAAW0S,KAAKyN,kBAAkBtX,GAClCuD,KAAMsG,KAAKuM,cAAcxO,GACzBoO,GAAIoB,IAIR,GADmBC,EAAa3D,IACjB7H,iBACb,OAEF,IAAKjE,IAAkBuP,EAGrB,OAEF,MAAMI,EAAY5M,QAAQd,KAAKoL,WAC/BpL,KAAK8K,QACL9K,KAAKsL,YAAa,EAClBtL,KAAKgN,2BAA2BO,GAChCvN,KAAKqL,eAAiBiC,EACtB,MAAMK,EAAuBN,EA3OR,sBADF,oBA6ObO,EAAiBP,EA3OH,qBACA,qBA2OpBC,EAAYjS,UAAU5E,IAAImX,GAC1B/R,GAAOyR,GACPvP,EAAc1C,UAAU5E,IAAIkX,GAC5BL,EAAYjS,UAAU5E,IAAIkX,GAQ1B3N,KAAKmF,gBAPoB,KACvBmI,EAAYjS,UAAU1B,OAAOgU,EAAsBC,GACnDN,EAAYjS,UAAU5E,IAAI6T,IAC1BvM,EAAc1C,UAAU1B,OAAO2Q,GAAqBsD,EAAgBD,GACpE3N,KAAKsL,YAAa,EAClBkC,EAAa1D,GAAW,GAEY/L,EAAeiC,KAAK6N,eACtDH,GACF1N,KAAK2L,OAET,CACA,WAAAkC,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAhQV,QAiQvB,CACA,UAAAgoB,GACE,OAAO5G,GAAeC,QAAQ4E,GAAsBzK,KAAK4E,SAC3D,CACA,SAAAyH,GACE,OAAOzG,GAAezT,KAAKqY,GAAexK,KAAK4E,SACjD,CACA,cAAAmH,GACM/L,KAAKoL,YACP0C,cAAc9N,KAAKoL,WACnBpL,KAAKoL,UAAY,KAErB,CACA,iBAAA0B,CAAkBxf,GAChB,OAAI2O,KACK3O,IAAcqc,GAAiBD,GAAaD,GAE9Cnc,IAAcqc,GAAiBF,GAAaC,EACrD,CACA,iBAAA+D,CAAkBtX,GAChB,OAAI8F,KACK9F,IAAUuT,GAAaC,GAAiBC,GAE1CzT,IAAUuT,GAAaE,GAAkBD,EAClD,CAGA,sBAAOlN,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO8gB,GAAS7F,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,GAIX,GAAsB,iBAAXA,EAAqB,CAC9B,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,OAREzZ,EAAK8hB,GAAGrI,EASZ,GACF,EAOFvD,GAAac,GAAGhc,SAAU+kB,GAvSE,uCAuS2C,SAAUhL,GAC/E,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACrD,IAAKzT,IAAWA,EAAO8O,UAAU7W,SAAS6lB,IACxC,OAEFjL,EAAMkD,iBACN,MAAMyL,EAAW5C,GAAS7F,oBAAoB/Y,GACxCyhB,EAAahO,KAAKxE,aAAa,oBACrC,OAAIwS,GACFD,EAAS5B,GAAG6B,QACZD,EAAS7B,qBAGyC,SAAhDlJ,GAAYQ,iBAAiBxD,KAAM,UACrC+N,EAASlpB,YACTkpB,EAAS7B,sBAGX6B,EAAS7H,YACT6H,EAAS7B,oBACX,IACA3L,GAAac,GAAGzhB,OAAQuqB,IAAuB,KAC7C,MAAM8D,EAAYrI,GAAezT,KA5TR,6BA6TzB,IAAK,MAAM4b,KAAYE,EACrB9C,GAAS7F,oBAAoByI,EAC/B,IAOF5R,GAAmBgP,IAcnB,MAEM+C,GAAc,eAEdC,GAAe,OAAOD,KACtBE,GAAgB,QAAQF,KACxBG,GAAe,OAAOH,KACtBI,GAAiB,SAASJ,KAC1BK,GAAyB,QAAQL,cACjCM,GAAoB,OACpBC,GAAsB,WACtBC,GAAwB,aAExBC,GAA6B,WAAWF,OAAwBA,KAKhEG,GAAyB,8BACzBC,GAAY,CAChBpqB,OAAQ,KACRijB,QAAQ,GAEJoH,GAAgB,CACpBrqB,OAAQ,iBACRijB,OAAQ,WAOV,MAAMqH,WAAiBrK,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgP,kBAAmB,EACxBhP,KAAKiP,cAAgB,GACrB,MAAMC,EAAatJ,GAAezT,KAAKyc,IACvC,IAAK,MAAMO,KAAQD,EAAY,CAC7B,MAAMnV,EAAW6L,GAAea,uBAAuB0I,GACjDC,EAAgBxJ,GAAezT,KAAK4H,GAAU5T,QAAOkpB,GAAgBA,IAAiBrP,KAAK4E,WAChF,OAAb7K,GAAqBqV,EAAc1e,QACrCsP,KAAKiP,cAAcrd,KAAKud,EAE5B,CACAnP,KAAKsP,sBACAtP,KAAK6E,QAAQpgB,QAChBub,KAAKuP,0BAA0BvP,KAAKiP,cAAejP,KAAKwP,YAEtDxP,KAAK6E,QAAQ6C,QACf1H,KAAK0H,QAET,CAGA,kBAAWhE,GACT,OAAOmL,EACT,CACA,sBAAWlL,GACT,OAAOmL,EACT,CACA,eAAWvS,GACT,MA9DW,UA+Db,CAGA,MAAAmL,GACM1H,KAAKwP,WACPxP,KAAKyP,OAELzP,KAAK0P,MAET,CACA,IAAAA,GACE,GAAI1P,KAAKgP,kBAAoBhP,KAAKwP,WAChC,OAEF,IAAIG,EAAiB,GAQrB,GALI3P,KAAK6E,QAAQpgB,SACfkrB,EAAiB3P,KAAK4P,uBAhEH,wCAgE4CzpB,QAAO5G,GAAWA,IAAYygB,KAAK4E,WAAU9hB,KAAIvD,GAAWwvB,GAASzJ,oBAAoB/lB,EAAS,CAC/JmoB,QAAQ,OAGRiI,EAAejf,QAAUif,EAAe,GAAGX,iBAC7C,OAGF,GADmBzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuJ,IACxCnM,iBACb,OAEF,IAAK,MAAM6N,KAAkBF,EAC3BE,EAAeJ,OAEjB,MAAMK,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAASvJ,UAAU1B,OAAO8U,IAC/BzO,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAAS7jB,MAAM+uB,GAAa,EACjC9P,KAAKuP,0BAA0BvP,KAAKiP,eAAe,GACnDjP,KAAKgP,kBAAmB,EACxB,MAQMgB,EAAa,SADUF,EAAU,GAAGrL,cAAgBqL,EAAU1d,MAAM,KAE1E4N,KAAKmF,gBATY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,GAAqBD,IACjDxO,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjCvP,GAAaqB,QAAQ5B,KAAK4E,SAAUwJ,GAAc,GAItBpO,KAAK4E,UAAU,GAC7C5E,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASoL,MACpD,CACA,IAAAP,GACE,GAAIzP,KAAKgP,mBAAqBhP,KAAKwP,WACjC,OAGF,GADmBjP,GAAaqB,QAAQ5B,KAAK4E,SAAUyJ,IACxCrM,iBACb,OAEF,MAAM8N,EAAY9P,KAAK+P,gBACvB/P,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GAAG9P,KAAK4E,SAASthB,wBAAwBwsB,OAC1EjU,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIiY,IAC5B1O,KAAK4E,SAASvJ,UAAU1B,OAAO8U,GAAqBD,IACpD,IAAK,MAAM5M,KAAW5B,KAAKiP,cAAe,CACxC,MAAM1vB,EAAUqmB,GAAec,uBAAuB9E,GAClDriB,IAAYygB,KAAKwP,SAASjwB,IAC5BygB,KAAKuP,0BAA0B,CAAC3N,IAAU,EAE9C,CACA5B,KAAKgP,kBAAmB,EAOxBhP,KAAK4E,SAAS7jB,MAAM+uB,GAAa,GACjC9P,KAAKmF,gBAPY,KACfnF,KAAKgP,kBAAmB,EACxBhP,KAAK4E,SAASvJ,UAAU1B,OAAO+U,IAC/B1O,KAAK4E,SAASvJ,UAAU5E,IAAIgY,IAC5BlO,GAAaqB,QAAQ5B,KAAK4E,SAAU0J,GAAe,GAGvBtO,KAAK4E,UAAU,EAC/C,CACA,QAAA4K,CAASjwB,EAAUygB,KAAK4E,UACtB,OAAOrlB,EAAQ8b,UAAU7W,SAASgqB,GACpC,CAGA,iBAAAxK,CAAkBF,GAGhB,OAFAA,EAAO4D,OAAS5G,QAAQgD,EAAO4D,QAC/B5D,EAAOrf,OAASiW,GAAWoJ,EAAOrf,QAC3Bqf,CACT,CACA,aAAAiM,GACE,OAAO/P,KAAK4E,SAASvJ,UAAU7W,SA3IL,uBAChB,QACC,QA0Ib,CACA,mBAAA8qB,GACE,IAAKtP,KAAK6E,QAAQpgB,OAChB,OAEF,MAAMqhB,EAAW9F,KAAK4P,uBAAuBhB,IAC7C,IAAK,MAAMrvB,KAAWumB,EAAU,CAC9B,MAAMmK,EAAWrK,GAAec,uBAAuBnnB,GACnD0wB,GACFjQ,KAAKuP,0BAA0B,CAAChwB,GAAUygB,KAAKwP,SAASS,GAE5D,CACF,CACA,sBAAAL,CAAuB7V,GACrB,MAAM+L,EAAWF,GAAezT,KAAKwc,GAA4B3O,KAAK6E,QAAQpgB,QAE9E,OAAOmhB,GAAezT,KAAK4H,EAAUiG,KAAK6E,QAAQpgB,QAAQ0B,QAAO5G,IAAYumB,EAAS1E,SAAS7hB,IACjG,CACA,yBAAAgwB,CAA0BW,EAAcC,GACtC,GAAKD,EAAaxf,OAGlB,IAAK,MAAMnR,KAAW2wB,EACpB3wB,EAAQ8b,UAAUqM,OArKK,aAqKyByI,GAChD5wB,EAAQ6B,aAAa,gBAAiB+uB,EAE1C,CAGA,sBAAO1T,CAAgBqH,GACrB,MAAMe,EAAU,CAAC,EAIjB,MAHsB,iBAAXf,GAAuB,YAAYzgB,KAAKygB,KACjDe,EAAQ6C,QAAS,GAEZ1H,KAAKuH,MAAK,WACf,MAAMld,EAAO0kB,GAASzJ,oBAAoBtF,KAAM6E,GAChD,GAAsB,iBAAXf,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IACP,CACF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkpB,GAAwBK,IAAwB,SAAUxP,IAErD,MAAzBA,EAAM7S,OAAOya,SAAmB5H,EAAMW,gBAAmD,MAAjCX,EAAMW,eAAeiH,UAC/E5H,EAAMkD,iBAER,IAAK,MAAM/iB,KAAWqmB,GAAee,gCAAgC3G,MACnE+O,GAASzJ,oBAAoB/lB,EAAS,CACpCmoB,QAAQ,IACPA,QAEP,IAMAvL,GAAmB4S,IAcnB,MAAMqB,GAAS,WAETC,GAAc,eACdC,GAAiB,YAGjBC,GAAiB,UACjBC,GAAmB,YAGnBC,GAAe,OAAOJ,KACtBK,GAAiB,SAASL,KAC1BM,GAAe,OAAON,KACtBO,GAAgB,QAAQP,KACxBQ,GAAyB,QAAQR,KAAcC,KAC/CQ,GAAyB,UAAUT,KAAcC,KACjDS,GAAuB,QAAQV,KAAcC,KAC7CU,GAAoB,OAMpBC,GAAyB,4DACzBC,GAA6B,GAAGD,MAA0BD,KAC1DG,GAAgB,iBAIhBC,GAAgBnV,KAAU,UAAY,YACtCoV,GAAmBpV,KAAU,YAAc,UAC3CqV,GAAmBrV,KAAU,aAAe,eAC5CsV,GAAsBtV,KAAU,eAAiB,aACjDuV,GAAkBvV,KAAU,aAAe,cAC3CwV,GAAiBxV,KAAU,cAAgB,aAG3CyV,GAAY,CAChBC,WAAW,EACX1jB,SAAU,kBACV2jB,QAAS,UACT5pB,OAAQ,CAAC,EAAG,GACZ6pB,aAAc,KACdvzB,UAAW,UAEPwzB,GAAgB,CACpBH,UAAW,mBACX1jB,SAAU,mBACV2jB,QAAS,SACT5pB,OAAQ,0BACR6pB,aAAc,yBACdvzB,UAAW,2BAOb,MAAMyzB,WAAiBrN,GACrB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKgS,QAAU,KACfhS,KAAKiS,QAAUjS,KAAK4E,SAAS7f,WAE7Bib,KAAKkS,MAAQtM,GAAe/gB,KAAKmb,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeM,KAAKlG,KAAK4E,SAAUuM,IAAe,IAAMvL,GAAeC,QAAQsL,GAAenR,KAAKiS,SACxKjS,KAAKmS,UAAYnS,KAAKoS,eACxB,CAGA,kBAAW1O,GACT,OAAOgO,EACT,CACA,sBAAW/N,GACT,OAAOmO,EACT,CACA,eAAWvV,GACT,OAAO6T,EACT,CAGA,MAAA1I,GACE,OAAO1H,KAAKwP,WAAaxP,KAAKyP,OAASzP,KAAK0P,MAC9C,CACA,IAAAA,GACE,GAAIxU,GAAW8E,KAAK4E,WAAa5E,KAAKwP,WACpC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAGtB,IADkBrE,GAAaqB,QAAQ5B,KAAK4E,SAAU+L,GAAc7Q,GACtDkC,iBAAd,CASA,GANAhC,KAAKqS,gBAMD,iBAAkBhtB,SAASC,kBAAoB0a,KAAKiS,QAAQjX,QAzExC,eA0EtB,IAAK,MAAMzb,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAG1CoE,KAAK4E,SAAS0N,QACdtS,KAAK4E,SAASxjB,aAAa,iBAAiB,GAC5C4e,KAAKkS,MAAM7W,UAAU5E,IAAIua,IACzBhR,KAAK4E,SAASvJ,UAAU5E,IAAIua,IAC5BzQ,GAAaqB,QAAQ5B,KAAK4E,SAAUgM,GAAe9Q,EAhBnD,CAiBF,CACA,IAAA2P,GACE,GAAIvU,GAAW8E,KAAK4E,YAAc5E,KAAKwP,WACrC,OAEF,MAAM1P,EAAgB,CACpBA,cAAeE,KAAK4E,UAEtB5E,KAAKuS,cAAczS,EACrB,CACA,OAAAiF,GACM/E,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEf2L,MAAMI,SACR,CACA,MAAAha,GACEiV,KAAKmS,UAAYnS,KAAKoS,gBAClBpS,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,aAAAwnB,CAAczS,GAEZ,IADkBS,GAAaqB,QAAQ5B,KAAK4E,SAAU6L,GAAc3Q,GACtDkC,iBAAd,CAMA,GAAI,iBAAkB3c,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAGvCoE,KAAKgS,SACPhS,KAAKgS,QAAQhZ,UAEfgH,KAAKkS,MAAM7W,UAAU1B,OAAOqX,IAC5BhR,KAAK4E,SAASvJ,UAAU1B,OAAOqX,IAC/BhR,KAAK4E,SAASxjB,aAAa,gBAAiB,SAC5C4hB,GAAYE,oBAAoBlD,KAAKkS,MAAO,UAC5C3R,GAAaqB,QAAQ5B,KAAK4E,SAAU8L,GAAgB5Q,EAhBpD,CAiBF,CACA,UAAA+D,CAAWC,GAET,GAAgC,iBADhCA,EAASa,MAAMd,WAAWC,IACRxlB,YAA2B,GAAUwlB,EAAOxlB,YAAgE,mBAA3CwlB,EAAOxlB,UAAUgF,sBAElG,MAAM,IAAIkhB,UAAU,GAAG4L,GAAO3L,+GAEhC,OAAOX,CACT,CACA,aAAAuO,GACE,QAAsB,IAAX,EACT,MAAM,IAAI7N,UAAU,gEAEtB,IAAIgO,EAAmBxS,KAAK4E,SACG,WAA3B5E,KAAK6E,QAAQvmB,UACfk0B,EAAmBxS,KAAKiS,QACf,GAAUjS,KAAK6E,QAAQvmB,WAChCk0B,EAAmB9X,GAAWsF,KAAK6E,QAAQvmB,WACA,iBAA3B0hB,KAAK6E,QAAQvmB,YAC7Bk0B,EAAmBxS,KAAK6E,QAAQvmB,WAElC,MAAMuzB,EAAe7R,KAAKyS,mBAC1BzS,KAAKgS,QAAU,GAAoBQ,EAAkBxS,KAAKkS,MAAOL,EACnE,CACA,QAAArC,GACE,OAAOxP,KAAKkS,MAAM7W,UAAU7W,SAASwsB,GACvC,CACA,aAAA0B,GACE,MAAMC,EAAiB3S,KAAKiS,QAC5B,GAAIU,EAAetX,UAAU7W,SArKN,WAsKrB,OAAOgtB,GAET,GAAImB,EAAetX,UAAU7W,SAvKJ,aAwKvB,OAAOitB,GAET,GAAIkB,EAAetX,UAAU7W,SAzKA,iBA0K3B,MA5JsB,MA8JxB,GAAImuB,EAAetX,UAAU7W,SA3KE,mBA4K7B,MA9JyB,SAkK3B,MAAMouB,EAAkF,QAA1E3tB,iBAAiB+a,KAAKkS,OAAOpX,iBAAiB,iBAAiB6K,OAC7E,OAAIgN,EAAetX,UAAU7W,SArLP,UAsLbouB,EAAQvB,GAAmBD,GAE7BwB,EAAQrB,GAAsBD,EACvC,CACA,aAAAc,GACE,OAAkD,OAA3CpS,KAAK4E,SAAS5J,QAnLD,UAoLtB,CACA,UAAA6X,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,gBAAAyqB,GACE,MAAMM,EAAwB,CAC5Br0B,UAAWshB,KAAK0S,gBAChBtc,UAAW,CAAC,CACV9V,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,iBAanB,OAPI7S,KAAKmS,WAAsC,WAAzBnS,KAAK6E,QAAQ+M,WACjC5O,GAAYC,iBAAiBjD,KAAKkS,MAAO,SAAU,UACnDa,EAAsB3c,UAAY,CAAC,CACjC9V,KAAM,cACNC,SAAS,KAGN,IACFwyB,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,eAAAC,EAAgB,IACdl2B,EAAG,OACHyP,IAEA,MAAM6f,EAAQxG,GAAezT,KAhOF,8DAgO+B6N,KAAKkS,OAAO/rB,QAAO5G,GAAWob,GAAUpb,KAC7F6sB,EAAM1b,QAMXoN,GAAqBsO,EAAO7f,EAAQzP,IAAQ0zB,IAAmBpE,EAAMhL,SAAS7U,IAAS+lB,OACzF,CAGA,sBAAO7V,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO0nB,GAASzM,oBAAoBtF,KAAM8D,GAChD,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,CACA,iBAAOmP,CAAW7T,GAChB,GA5QuB,IA4QnBA,EAAMuI,QAAgD,UAAfvI,EAAMqB,MA/QnC,QA+QuDrB,EAAMtiB,IACzE,OAEF,MAAMo2B,EAActN,GAAezT,KAAK+e,IACxC,IAAK,MAAMxJ,KAAUwL,EAAa,CAChC,MAAMC,EAAUpB,GAAS1M,YAAYqC,GACrC,IAAKyL,IAAyC,IAA9BA,EAAQtO,QAAQ8M,UAC9B,SAEF,MAAMyB,EAAehU,EAAMgU,eACrBC,EAAeD,EAAahS,SAAS+R,EAAQjB,OACnD,GAAIkB,EAAahS,SAAS+R,EAAQvO,WAA2C,WAA9BuO,EAAQtO,QAAQ8M,YAA2B0B,GAA8C,YAA9BF,EAAQtO,QAAQ8M,WAA2B0B,EACnJ,SAIF,GAAIF,EAAQjB,MAAM1tB,SAAS4a,EAAM7S,UAA2B,UAAf6S,EAAMqB,MA/RvC,QA+R2DrB,EAAMtiB,KAAqB,qCAAqCuG,KAAK+b,EAAM7S,OAAOya,UACvJ,SAEF,MAAMlH,EAAgB,CACpBA,cAAeqT,EAAQvO,UAEN,UAAfxF,EAAMqB,OACRX,EAAciH,WAAa3H,GAE7B+T,EAAQZ,cAAczS,EACxB,CACF,CACA,4BAAOwT,CAAsBlU,GAI3B,MAAMmU,EAAU,kBAAkBlwB,KAAK+b,EAAM7S,OAAOya,SAC9CwM,EAjTW,WAiTKpU,EAAMtiB,IACtB22B,EAAkB,CAAClD,GAAgBC,IAAkBpP,SAAShC,EAAMtiB,KAC1E,IAAK22B,IAAoBD,EACvB,OAEF,GAAID,IAAYC,EACd,OAEFpU,EAAMkD,iBAGN,MAAMoR,EAAkB1T,KAAK+F,QAAQkL,IAA0BjR,KAAO4F,GAAeM,KAAKlG,KAAMiR,IAAwB,IAAMrL,GAAe/gB,KAAKmb,KAAMiR,IAAwB,IAAMrL,GAAeC,QAAQoL,GAAwB7R,EAAMW,eAAehb,YACpPwF,EAAWwnB,GAASzM,oBAAoBoO,GAC9C,GAAID,EAIF,OAHArU,EAAMuU,kBACNppB,EAASmlB,YACTnlB,EAASyoB,gBAAgB5T,GAGvB7U,EAASilB,aAEXpQ,EAAMuU,kBACNppB,EAASklB,OACTiE,EAAgBpB,QAEpB,EAOF/R,GAAac,GAAGhc,SAAUyrB,GAAwBG,GAAwBc,GAASuB,uBACnF/S,GAAac,GAAGhc,SAAUyrB,GAAwBK,GAAeY,GAASuB,uBAC1E/S,GAAac,GAAGhc,SAAUwrB,GAAwBkB,GAASkB,YAC3D1S,GAAac,GAAGhc,SAAU0rB,GAAsBgB,GAASkB,YACzD1S,GAAac,GAAGhc,SAAUwrB,GAAwBI,IAAwB,SAAU7R,GAClFA,EAAMkD,iBACNyP,GAASzM,oBAAoBtF,MAAM0H,QACrC,IAMAvL,GAAmB4V,IAcnB,MAAM6B,GAAS,WAETC,GAAoB,OACpBC,GAAkB,gBAAgBF,KAClCG,GAAY,CAChBC,UAAW,iBACXC,cAAe,KACf7O,YAAY,EACZzK,WAAW,EAEXuZ,YAAa,QAGTC,GAAgB,CACpBH,UAAW,SACXC,cAAe,kBACf7O,WAAY,UACZzK,UAAW,UACXuZ,YAAa,oBAOf,MAAME,WAAiB3Q,GACrB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqU,aAAc,EACnBrU,KAAK4E,SAAW,IAClB,CAGA,kBAAWlB,GACT,OAAOqQ,EACT,CACA,sBAAWpQ,GACT,OAAOwQ,EACT,CACA,eAAW5X,GACT,OAAOqX,EACT,CAGA,IAAAlE,CAAKrT,GACH,IAAK2D,KAAK6E,QAAQlK,UAEhB,YADAkC,GAAQR,GAGV2D,KAAKsU,UACL,MAAM/0B,EAAUygB,KAAKuU,cACjBvU,KAAK6E,QAAQO,YACfvJ,GAAOtc,GAETA,EAAQ8b,UAAU5E,IAAIod,IACtB7T,KAAKwU,mBAAkB,KACrB3X,GAAQR,EAAS,GAErB,CACA,IAAAoT,CAAKpT,GACE2D,KAAK6E,QAAQlK,WAIlBqF,KAAKuU,cAAclZ,UAAU1B,OAAOka,IACpC7T,KAAKwU,mBAAkB,KACrBxU,KAAK+E,UACLlI,GAAQR,EAAS,KANjBQ,GAAQR,EAQZ,CACA,OAAA0I,GACO/E,KAAKqU,cAGV9T,GAAaC,IAAIR,KAAK4E,SAAUkP,IAChC9T,KAAK4E,SAASjL,SACdqG,KAAKqU,aAAc,EACrB,CAGA,WAAAE,GACE,IAAKvU,KAAK4E,SAAU,CAClB,MAAM6P,EAAWpvB,SAASqvB,cAAc,OACxCD,EAAST,UAAYhU,KAAK6E,QAAQmP,UAC9BhU,KAAK6E,QAAQO,YACfqP,EAASpZ,UAAU5E,IArFD,QAuFpBuJ,KAAK4E,SAAW6P,CAClB,CACA,OAAOzU,KAAK4E,QACd,CACA,iBAAAZ,CAAkBF,GAGhB,OADAA,EAAOoQ,YAAcxZ,GAAWoJ,EAAOoQ,aAChCpQ,CACT,CACA,OAAAwQ,GACE,GAAItU,KAAKqU,YACP,OAEF,MAAM90B,EAAUygB,KAAKuU,cACrBvU,KAAK6E,QAAQqP,YAAYS,OAAOp1B,GAChCghB,GAAac,GAAG9hB,EAASu0B,IAAiB,KACxCjX,GAAQmD,KAAK6E,QAAQoP,cAAc,IAErCjU,KAAKqU,aAAc,CACrB,CACA,iBAAAG,CAAkBnY,GAChBW,GAAuBX,EAAU2D,KAAKuU,cAAevU,KAAK6E,QAAQO,WACpE,EAeF,MAEMwP,GAAc,gBACdC,GAAkB,UAAUD,KAC5BE,GAAoB,cAAcF,KAGlCG,GAAmB,WACnBC,GAAY,CAChBC,WAAW,EACXC,YAAa,MAGTC,GAAgB,CACpBF,UAAW,UACXC,YAAa,WAOf,MAAME,WAAkB3R,GACtB,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,GAC/B9D,KAAKqV,WAAY,EACjBrV,KAAKsV,qBAAuB,IAC9B,CAGA,kBAAW5R,GACT,OAAOsR,EACT,CACA,sBAAWrR,GACT,OAAOwR,EACT,CACA,eAAW5Y,GACT,MAtCW,WAuCb,CAGA,QAAAgZ,GACMvV,KAAKqV,YAGLrV,KAAK6E,QAAQoQ,WACfjV,KAAK6E,QAAQqQ,YAAY5C,QAE3B/R,GAAaC,IAAInb,SAAUuvB,IAC3BrU,GAAac,GAAGhc,SAAUwvB,IAAiBzV,GAASY,KAAKwV,eAAepW,KACxEmB,GAAac,GAAGhc,SAAUyvB,IAAmB1V,GAASY,KAAKyV,eAAerW,KAC1EY,KAAKqV,WAAY,EACnB,CACA,UAAAK,GACO1V,KAAKqV,YAGVrV,KAAKqV,WAAY,EACjB9U,GAAaC,IAAInb,SAAUuvB,IAC7B,CAGA,cAAAY,CAAepW,GACb,MAAM,YACJ8V,GACElV,KAAK6E,QACT,GAAIzF,EAAM7S,SAAWlH,UAAY+Z,EAAM7S,SAAW2oB,GAAeA,EAAY1wB,SAAS4a,EAAM7S,QAC1F,OAEF,MAAM1L,EAAW+kB,GAAeU,kBAAkB4O,GAC1B,IAApBr0B,EAAS6P,OACXwkB,EAAY5C,QACHtS,KAAKsV,uBAAyBP,GACvCl0B,EAASA,EAAS6P,OAAS,GAAG4hB,QAE9BzxB,EAAS,GAAGyxB,OAEhB,CACA,cAAAmD,CAAerW,GA1ED,QA2ERA,EAAMtiB,MAGVkjB,KAAKsV,qBAAuBlW,EAAMuW,SAAWZ,GA7EzB,UA8EtB,EAeF,MAAMa,GAAyB,oDACzBC,GAA0B,cAC1BC,GAAmB,gBACnBC,GAAkB,eAMxB,MAAMC,GACJ,WAAA7R,GACEnE,KAAK4E,SAAWvf,SAAS6G,IAC3B,CAGA,QAAA+pB,GAEE,MAAMC,EAAgB7wB,SAASC,gBAAgBuC,YAC/C,OAAO1F,KAAKoC,IAAI3E,OAAOu2B,WAAaD,EACtC,CACA,IAAAzG,GACE,MAAM5rB,EAAQmc,KAAKiW,WACnBjW,KAAKoW,mBAELpW,KAAKqW,sBAAsBrW,KAAK4E,SAAUkR,IAAkBQ,GAAmBA,EAAkBzyB,IAEjGmc,KAAKqW,sBAAsBT,GAAwBE,IAAkBQ,GAAmBA,EAAkBzyB,IAC1Gmc,KAAKqW,sBAAsBR,GAAyBE,IAAiBO,GAAmBA,EAAkBzyB,GAC5G,CACA,KAAAwO,GACE2N,KAAKuW,wBAAwBvW,KAAK4E,SAAU,YAC5C5E,KAAKuW,wBAAwBvW,KAAK4E,SAAUkR,IAC5C9V,KAAKuW,wBAAwBX,GAAwBE,IACrD9V,KAAKuW,wBAAwBV,GAAyBE,GACxD,CACA,aAAAS,GACE,OAAOxW,KAAKiW,WAAa,CAC3B,CAGA,gBAAAG,GACEpW,KAAKyW,sBAAsBzW,KAAK4E,SAAU,YAC1C5E,KAAK4E,SAAS7jB,MAAM+K,SAAW,QACjC,CACA,qBAAAuqB,CAAsBtc,EAAU2c,EAAera,GAC7C,MAAMsa,EAAiB3W,KAAKiW,WAS5BjW,KAAK4W,2BAA2B7c,GARHxa,IAC3B,GAAIA,IAAYygB,KAAK4E,UAAYhlB,OAAOu2B,WAAa52B,EAAQsI,YAAc8uB,EACzE,OAEF3W,KAAKyW,sBAAsBl3B,EAASm3B,GACpC,MAAMJ,EAAkB12B,OAAOqF,iBAAiB1F,GAASub,iBAAiB4b,GAC1En3B,EAAQwB,MAAM81B,YAAYH,EAAe,GAAGra,EAASkB,OAAOC,WAAW8Y,QAAsB,GAGjG,CACA,qBAAAG,CAAsBl3B,EAASm3B,GAC7B,MAAMI,EAAcv3B,EAAQwB,MAAM+Z,iBAAiB4b,GAC/CI,GACF9T,GAAYC,iBAAiB1jB,EAASm3B,EAAeI,EAEzD,CACA,uBAAAP,CAAwBxc,EAAU2c,GAWhC1W,KAAK4W,2BAA2B7c,GAVHxa,IAC3B,MAAM5B,EAAQqlB,GAAYQ,iBAAiBjkB,EAASm3B,GAEtC,OAAV/4B,GAIJqlB,GAAYE,oBAAoB3jB,EAASm3B,GACzCn3B,EAAQwB,MAAM81B,YAAYH,EAAe/4B,IAJvC4B,EAAQwB,MAAMg2B,eAAeL,EAIgB,GAGnD,CACA,0BAAAE,CAA2B7c,EAAUid,GACnC,GAAI,GAAUjd,GACZid,EAASjd,QAGX,IAAK,MAAMkd,KAAOrR,GAAezT,KAAK4H,EAAUiG,KAAK4E,UACnDoS,EAASC,EAEb,EAeF,MAEMC,GAAc,YAGdC,GAAe,OAAOD,KACtBE,GAAyB,gBAAgBF,KACzCG,GAAiB,SAASH,KAC1BI,GAAe,OAAOJ,KACtBK,GAAgB,QAAQL,KACxBM,GAAiB,SAASN,KAC1BO,GAAsB,gBAAgBP,KACtCQ,GAA0B,oBAAoBR,KAC9CS,GAA0B,kBAAkBT,KAC5CU,GAAyB,QAAQV,cACjCW,GAAkB,aAElBC,GAAoB,OACpBC,GAAoB,eAKpBC,GAAY,CAChBvD,UAAU,EACVnC,OAAO,EACPzH,UAAU,GAENoN,GAAgB,CACpBxD,SAAU,mBACVnC,MAAO,UACPzH,SAAU,WAOZ,MAAMqN,WAAcxT,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKmY,QAAUvS,GAAeC,QArBV,gBAqBmC7F,KAAK4E,UAC5D5E,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAa,IAAIxC,GACtBhW,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAOsU,EACT,CACA,sBAAWrU,GACT,OAAOsU,EACT,CACA,eAAW1b,GACT,MA1DW,OA2Db,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAAYxP,KAAKgP,kBAGRzO,GAAaqB,QAAQ5B,KAAK4E,SAAU0S,GAAc,CAClExX,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKwY,WAAW/I,OAChBpqB,SAAS6G,KAAKmP,UAAU5E,IAAIohB,IAC5B7X,KAAKyY,gBACLzY,KAAKoY,UAAU1I,MAAK,IAAM1P,KAAK0Y,aAAa5Y,KAC9C,CACA,IAAA2P,GACOzP,KAAKwP,WAAYxP,KAAKgP,mBAGTzO,GAAaqB,QAAQ5B,KAAK4E,SAAUuS,IACxCnV,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKgP,kBAAmB,EACxBhP,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAASvJ,UAAU1B,OAAOme,IAC/B9X,KAAKmF,gBAAe,IAAMnF,KAAK2Y,cAAc3Y,KAAK4E,SAAU5E,KAAK6N,gBACnE,CACA,OAAA9I,GACExE,GAAaC,IAAI5gB,OAAQs3B,IACzB3W,GAAaC,IAAIR,KAAKmY,QAASjB,IAC/BlX,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CACA,YAAA6T,GACE5Y,KAAKyY,eACP,CAGA,mBAAAJ,GACE,OAAO,IAAIjE,GAAS,CAClBzZ,UAAWmG,QAAQd,KAAK6E,QAAQ4P,UAEhCrP,WAAYpF,KAAK6N,eAErB,CACA,oBAAA0K,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,YAAA8T,CAAa5Y,GAENza,SAAS6G,KAAK1H,SAASwb,KAAK4E,WAC/Bvf,SAAS6G,KAAKyoB,OAAO3U,KAAK4E,UAE5B5E,KAAK4E,SAAS7jB,MAAM6wB,QAAU,QAC9B5R,KAAK4E,SAASzjB,gBAAgB,eAC9B6e,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASnZ,UAAY,EAC1B,MAAMotB,EAAYjT,GAAeC,QA7GT,cA6GsC7F,KAAKmY,SAC/DU,IACFA,EAAUptB,UAAY,GAExBoQ,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAIqhB,IAU5B9X,KAAKmF,gBATsB,KACrBnF,KAAK6E,QAAQyN,OACftS,KAAKsY,WAAW/C,WAElBvV,KAAKgP,kBAAmB,EACxBzO,GAAaqB,QAAQ5B,KAAK4E,SAAU2S,GAAe,CACjDzX,iBACA,GAEoCE,KAAKmY,QAASnY,KAAK6N,cAC7D,CACA,kBAAAnC,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU+S,IAAyBvY,IAhJvC,WAiJXA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPzP,KAAK8Y,6BAA4B,IAEnCvY,GAAac,GAAGzhB,OAAQ43B,IAAgB,KAClCxX,KAAKwP,WAAaxP,KAAKgP,kBACzBhP,KAAKyY,eACP,IAEFlY,GAAac,GAAGrB,KAAK4E,SAAU8S,IAAyBtY,IAEtDmB,GAAae,IAAItB,KAAK4E,SAAU6S,IAAqBsB,IAC/C/Y,KAAK4E,WAAaxF,EAAM7S,QAAUyT,KAAK4E,WAAamU,EAAOxsB,SAGjC,WAA1ByT,KAAK6E,QAAQ4P,SAIbzU,KAAK6E,QAAQ4P,UACfzU,KAAKyP,OAJLzP,KAAK8Y,6BAKP,GACA,GAEN,CACA,UAAAH,GACE3Y,KAAK4E,SAAS7jB,MAAM6wB,QAAU,OAC9B5R,KAAK4E,SAASxjB,aAAa,eAAe,GAC1C4e,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QAC9B6e,KAAKgP,kBAAmB,EACxBhP,KAAKoY,UAAU3I,MAAK,KAClBpqB,SAAS6G,KAAKmP,UAAU1B,OAAOke,IAC/B7X,KAAKgZ,oBACLhZ,KAAKwY,WAAWnmB,QAChBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUyS,GAAe,GAEvD,CACA,WAAAxJ,GACE,OAAO7N,KAAK4E,SAASvJ,UAAU7W,SAjLT,OAkLxB,CACA,0BAAAs0B,GAEE,GADkBvY,GAAaqB,QAAQ5B,KAAK4E,SAAUwS,IACxCpV,iBACZ,OAEF,MAAMiX,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3EsxB,EAAmBlZ,KAAK4E,SAAS7jB,MAAMiL,UAEpB,WAArBktB,GAAiClZ,KAAK4E,SAASvJ,UAAU7W,SAASuzB,MAGjEkB,IACHjZ,KAAK4E,SAAS7jB,MAAMiL,UAAY,UAElCgU,KAAK4E,SAASvJ,UAAU5E,IAAIshB,IAC5B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAASvJ,UAAU1B,OAAOoe,IAC/B/X,KAAKmF,gBAAe,KAClBnF,KAAK4E,SAAS7jB,MAAMiL,UAAYktB,CAAgB,GAC/ClZ,KAAKmY,QAAQ,GACfnY,KAAKmY,SACRnY,KAAK4E,SAAS0N,QAChB,CAMA,aAAAmG,GACE,MAAMQ,EAAqBjZ,KAAK4E,SAASvX,aAAehI,SAASC,gBAAgBsC,aAC3E+uB,EAAiB3W,KAAKwY,WAAWvC,WACjCkD,EAAoBxC,EAAiB,EAC3C,GAAIwC,IAAsBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,cAAgB,eAC3C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACA,IAAKwC,GAAqBF,EAAoB,CAC5C,MAAMn3B,EAAWma,KAAU,eAAiB,cAC5C+D,KAAK4E,SAAS7jB,MAAMe,GAAY,GAAG60B,KACrC,CACF,CACA,iBAAAqC,GACEhZ,KAAK4E,SAAS7jB,MAAMq4B,YAAc,GAClCpZ,KAAK4E,SAAS7jB,MAAMs4B,aAAe,EACrC,CAGA,sBAAO5c,CAAgBqH,EAAQhE,GAC7B,OAAOE,KAAKuH,MAAK,WACf,MAAMld,EAAO6tB,GAAM5S,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQhE,EAJb,CAKF,GACF,EAOFS,GAAac,GAAGhc,SAAUuyB,GA9OK,4BA8O2C,SAAUxY,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MACjD,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAER/B,GAAae,IAAI/U,EAAQ+qB,IAAcgC,IACjCA,EAAUtX,kBAIdzB,GAAae,IAAI/U,EAAQ8qB,IAAgB,KACnC1c,GAAUqF,OACZA,KAAKsS,OACP,GACA,IAIJ,MAAMiH,EAAc3T,GAAeC,QAnQb,eAoQlB0T,GACFrB,GAAM7S,YAAYkU,GAAa9J,OAEpByI,GAAM5S,oBAAoB/Y,GAClCmb,OAAO1H,KACd,IACA4G,GAAqBsR,IAMrB/b,GAAmB+b,IAcnB,MAEMsB,GAAc,gBACdC,GAAiB,YACjBC,GAAwB,OAAOF,KAAcC,KAE7CE,GAAoB,OACpBC,GAAuB,UACvBC,GAAoB,SAEpBC,GAAgB,kBAChBC,GAAe,OAAOP,KACtBQ,GAAgB,QAAQR,KACxBS,GAAe,OAAOT,KACtBU,GAAuB,gBAAgBV,KACvCW,GAAiB,SAASX,KAC1BY,GAAe,SAASZ,KACxBa,GAAyB,QAAQb,KAAcC,KAC/Ca,GAAwB,kBAAkBd,KAE1Ce,GAAY,CAChB9F,UAAU,EACV5J,UAAU,EACVpgB,QAAQ,GAEJ+vB,GAAgB,CACpB/F,SAAU,mBACV5J,SAAU,UACVpgB,OAAQ,WAOV,MAAMgwB,WAAkB/V,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAYpY,KAAKqY,sBACtBrY,KAAKsY,WAAatY,KAAKuY,uBACvBvY,KAAK0L,oBACP,CAGA,kBAAWhI,GACT,OAAO6W,EACT,CACA,sBAAW5W,GACT,OAAO6W,EACT,CACA,eAAWje,GACT,MApDW,WAqDb,CAGA,MAAAmL,CAAO5H,GACL,OAAOE,KAAKwP,SAAWxP,KAAKyP,OAASzP,KAAK0P,KAAK5P,EACjD,CACA,IAAA4P,CAAK5P,GACCE,KAAKwP,UAGSjP,GAAaqB,QAAQ5B,KAAK4E,SAAUmV,GAAc,CAClEja,kBAEYkC,mBAGdhC,KAAKwP,UAAW,EAChBxP,KAAKoY,UAAU1I,OACV1P,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkBvG,OAExBzP,KAAK4E,SAASxjB,aAAa,cAAc,GACzC4e,KAAK4E,SAASxjB,aAAa,OAAQ,UACnC4e,KAAK4E,SAASvJ,UAAU5E,IAAImjB,IAW5B5Z,KAAKmF,gBAVoB,KAClBnF,KAAK6E,QAAQpa,SAAUuV,KAAK6E,QAAQ4P,UACvCzU,KAAKsY,WAAW/C,WAElBvV,KAAK4E,SAASvJ,UAAU5E,IAAIkjB,IAC5B3Z,KAAK4E,SAASvJ,UAAU1B,OAAOigB,IAC/BrZ,GAAaqB,QAAQ5B,KAAK4E,SAAUoV,GAAe,CACjDla,iBACA,GAEkCE,KAAK4E,UAAU,GACvD,CACA,IAAA6K,GACOzP,KAAKwP,WAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAUqV,IACxCjY,mBAGdhC,KAAKsY,WAAW5C,aAChB1V,KAAK4E,SAAS8V,OACd1a,KAAKwP,UAAW,EAChBxP,KAAK4E,SAASvJ,UAAU5E,IAAIojB,IAC5B7Z,KAAKoY,UAAU3I,OAUfzP,KAAKmF,gBAToB,KACvBnF,KAAK4E,SAASvJ,UAAU1B,OAAOggB,GAAmBE,IAClD7Z,KAAK4E,SAASzjB,gBAAgB,cAC9B6e,KAAK4E,SAASzjB,gBAAgB,QACzB6e,KAAK6E,QAAQpa,SAChB,IAAIurB,IAAkB3jB,QAExBkO,GAAaqB,QAAQ5B,KAAK4E,SAAUuV,GAAe,GAEfna,KAAK4E,UAAU,IACvD,CACA,OAAAG,GACE/E,KAAKoY,UAAUrT,UACf/E,KAAKsY,WAAW5C,aAChB/Q,MAAMI,SACR,CAGA,mBAAAsT,GACE,MASM1d,EAAYmG,QAAQd,KAAK6E,QAAQ4P,UACvC,OAAO,IAAIL,GAAS,CAClBJ,UA3HsB,qBA4HtBrZ,YACAyK,YAAY,EACZ8O,YAAalU,KAAK4E,SAAS7f,WAC3BkvB,cAAetZ,EAfK,KACU,WAA1BqF,KAAK6E,QAAQ4P,SAIjBzU,KAAKyP,OAHHlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,GAG3B,EAUgC,MAE/C,CACA,oBAAA3B,GACE,OAAO,IAAInD,GAAU,CACnBF,YAAalV,KAAK4E,UAEtB,CACA,kBAAA8G,GACEnL,GAAac,GAAGrB,KAAK4E,SAAU0V,IAAuBlb,IA5IvC,WA6ITA,EAAMtiB,MAGNkjB,KAAK6E,QAAQgG,SACf7K,KAAKyP,OAGPlP,GAAaqB,QAAQ5B,KAAK4E,SAAUsV,IAAqB,GAE7D,CAGA,sBAAOzd,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOowB,GAAUnV,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KAJb,CAKF,GACF,EAOFO,GAAac,GAAGhc,SAAUg1B,GA7JK,gCA6J2C,SAAUjb,GAClF,MAAM7S,EAASqZ,GAAec,uBAAuB1G,MAIrD,GAHI,CAAC,IAAK,QAAQoB,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,MACb,OAEFO,GAAae,IAAI/U,EAAQ4tB,IAAgB,KAEnCxf,GAAUqF,OACZA,KAAKsS,OACP,IAIF,MAAMiH,EAAc3T,GAAeC,QAAQiU,IACvCP,GAAeA,IAAgBhtB,GACjCkuB,GAAUpV,YAAYkU,GAAa9J,OAExBgL,GAAUnV,oBAAoB/Y,GACtCmb,OAAO1H,KACd,IACAO,GAAac,GAAGzhB,OAAQ85B,IAAuB,KAC7C,IAAK,MAAM3f,KAAY6L,GAAezT,KAAK2nB,IACzCW,GAAUnV,oBAAoBvL,GAAU2V,MAC1C,IAEFnP,GAAac,GAAGzhB,OAAQw6B,IAAc,KACpC,IAAK,MAAM76B,KAAWqmB,GAAezT,KAAK,gDACG,UAAvClN,iBAAiB1F,GAASiC,UAC5Bi5B,GAAUnV,oBAAoB/lB,GAASkwB,MAE3C,IAEF7I,GAAqB6T,IAMrBte,GAAmBse,IAUnB,MACME,GAAmB,CAEvB,IAAK,CAAC,QAAS,MAAO,KAAM,OAAQ,OAHP,kBAI7B9pB,EAAG,CAAC,SAAU,OAAQ,QAAS,OAC/B+pB,KAAM,GACN9pB,EAAG,GACH+pB,GAAI,GACJC,IAAK,GACLC,KAAM,GACNC,IAAK,GACLC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJC,GAAI,GACJnqB,EAAG,GACHub,IAAK,CAAC,MAAO,SAAU,MAAO,QAAS,QAAS,UAChD6O,GAAI,GACJC,GAAI,GACJC,EAAG,GACHC,IAAK,GACLC,EAAG,GACHC,MAAO,GACPC,KAAM,GACNC,IAAK,GACLC,IAAK,GACLC,OAAQ,GACRC,EAAG,GACHC,GAAI,IAIAC,GAAgB,IAAI/lB,IAAI,CAAC,aAAc,OAAQ,OAAQ,WAAY,WAAY,SAAU,MAAO,eAShGgmB,GAAmB,0DACnBC,GAAmB,CAACx6B,EAAWy6B,KACnC,MAAMC,EAAgB16B,EAAUvC,SAASC,cACzC,OAAI+8B,EAAqBpb,SAASqb,IAC5BJ,GAAc1lB,IAAI8lB,IACb3b,QAAQwb,GAAiBj5B,KAAKtB,EAAU26B,YAM5CF,EAAqBr2B,QAAOw2B,GAAkBA,aAA0BpY,SAAQ9R,MAAKmqB,GAASA,EAAMv5B,KAAKo5B,IAAe,EA0C3HI,GAAY,CAChBC,UAAWnC,GACXoC,QAAS,CAAC,EAEVC,WAAY,GACZnwB,MAAM,EACNowB,UAAU,EACVC,WAAY,KACZC,SAAU,eAENC,GAAgB,CACpBN,UAAW,SACXC,QAAS,SACTC,WAAY,oBACZnwB,KAAM,UACNowB,SAAU,UACVC,WAAY,kBACZC,SAAU,UAENE,GAAqB,CACzBC,MAAO,iCACPvjB,SAAU,oBAOZ,MAAMwjB,WAAwB9Z,GAC5B,WAAAU,CAAYL,GACVa,QACA3E,KAAK6E,QAAU7E,KAAK6D,WAAWC,EACjC,CAGA,kBAAWJ,GACT,OAAOmZ,EACT,CACA,sBAAWlZ,GACT,OAAOyZ,EACT,CACA,eAAW7gB,GACT,MA3CW,iBA4Cb,CAGA,UAAAihB,GACE,OAAOxgC,OAAOmiB,OAAOa,KAAK6E,QAAQkY,SAASj6B,KAAIghB,GAAU9D,KAAKyd,yBAAyB3Z,KAAS3d,OAAO2a,QACzG,CACA,UAAA4c,GACE,OAAO1d,KAAKwd,aAAa9sB,OAAS,CACpC,CACA,aAAAitB,CAAcZ,GAMZ,OALA/c,KAAK4d,cAAcb,GACnB/c,KAAK6E,QAAQkY,QAAU,IAClB/c,KAAK6E,QAAQkY,WACbA,GAEE/c,IACT,CACA,MAAA6d,GACE,MAAMC,EAAkBz4B,SAASqvB,cAAc,OAC/CoJ,EAAgBC,UAAY/d,KAAKge,eAAehe,KAAK6E,QAAQsY,UAC7D,IAAK,MAAOpjB,EAAUkkB,KAASjhC,OAAOmkB,QAAQnB,KAAK6E,QAAQkY,SACzD/c,KAAKke,YAAYJ,EAAiBG,EAAMlkB,GAE1C,MAAMojB,EAAWW,EAAgBhY,SAAS,GACpCkX,EAAahd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmY,YAI9D,OAHIA,GACFG,EAAS9hB,UAAU5E,OAAOumB,EAAW96B,MAAM,MAEtCi7B,CACT,CAGA,gBAAAlZ,CAAiBH,GACfa,MAAMV,iBAAiBH,GACvB9D,KAAK4d,cAAc9Z,EAAOiZ,QAC5B,CACA,aAAAa,CAAcO,GACZ,IAAK,MAAOpkB,EAAUgjB,KAAY//B,OAAOmkB,QAAQgd,GAC/CxZ,MAAMV,iBAAiB,CACrBlK,WACAujB,MAAOP,GACNM,GAEP,CACA,WAAAa,CAAYf,EAAUJ,EAAShjB,GAC7B,MAAMqkB,EAAkBxY,GAAeC,QAAQ9L,EAAUojB,GACpDiB,KAGLrB,EAAU/c,KAAKyd,yBAAyBV,IAKpC,GAAUA,GACZ/c,KAAKqe,sBAAsB3jB,GAAWqiB,GAAUqB,GAG9Cpe,KAAK6E,QAAQhY,KACfuxB,EAAgBL,UAAY/d,KAAKge,eAAejB,GAGlDqB,EAAgBE,YAAcvB,EAX5BqB,EAAgBzkB,SAYpB,CACA,cAAAqkB,CAAeG,GACb,OAAOne,KAAK6E,QAAQoY,SApJxB,SAAsBsB,EAAYzB,EAAW0B,GAC3C,IAAKD,EAAW7tB,OACd,OAAO6tB,EAET,GAAIC,GAAgD,mBAArBA,EAC7B,OAAOA,EAAiBD,GAE1B,MACME,GADY,IAAI7+B,OAAO8+B,WACKC,gBAAgBJ,EAAY,aACxD19B,EAAW,GAAGlC,UAAU8/B,EAAgBvyB,KAAKkU,iBAAiB,MACpE,IAAK,MAAM7gB,KAAWsB,EAAU,CAC9B,MAAM+9B,EAAcr/B,EAAQC,SAASC,cACrC,IAAKzC,OAAO4D,KAAKk8B,GAAW1b,SAASwd,GAAc,CACjDr/B,EAAQoa,SACR,QACF,CACA,MAAMklB,EAAgB,GAAGlgC,UAAUY,EAAQ0B,YACrC69B,EAAoB,GAAGngC,OAAOm+B,EAAU,MAAQ,GAAIA,EAAU8B,IAAgB,IACpF,IAAK,MAAM78B,KAAa88B,EACjBtC,GAAiBx6B,EAAW+8B,IAC/Bv/B,EAAQ4B,gBAAgBY,EAAUvC,SAGxC,CACA,OAAOi/B,EAAgBvyB,KAAK6xB,SAC9B,CA2HmCgB,CAAaZ,EAAKne,KAAK6E,QAAQiY,UAAW9c,KAAK6E,QAAQqY,YAAciB,CACtG,CACA,wBAAAV,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,MACvB,CACA,qBAAAqe,CAAsB9+B,EAAS6+B,GAC7B,GAAIpe,KAAK6E,QAAQhY,KAGf,OAFAuxB,EAAgBL,UAAY,QAC5BK,EAAgBzJ,OAAOp1B,GAGzB6+B,EAAgBE,YAAc/+B,EAAQ++B,WACxC,EAeF,MACMU,GAAwB,IAAI1oB,IAAI,CAAC,WAAY,YAAa,eAC1D2oB,GAAoB,OAEpBC,GAAoB,OAEpBC,GAAiB,SACjBC,GAAmB,gBACnBC,GAAgB,QAChBC,GAAgB,QAahBC,GAAgB,CACpBC,KAAM,OACNC,IAAK,MACLC,MAAOzjB,KAAU,OAAS,QAC1B0jB,OAAQ,SACRC,KAAM3jB,KAAU,QAAU,QAEtB4jB,GAAY,CAChB/C,UAAWnC,GACXmF,WAAW,EACX7xB,SAAU,kBACV8xB,WAAW,EACXC,YAAa,GACbC,MAAO,EACPjwB,mBAAoB,CAAC,MAAO,QAAS,SAAU,QAC/CnD,MAAM,EACN7E,OAAQ,CAAC,EAAG,GACZtJ,UAAW,MACXmzB,aAAc,KACdoL,UAAU,EACVC,WAAY,KACZnjB,UAAU,EACVojB,SAAU,+GACV+C,MAAO,GACPte,QAAS,eAELue,GAAgB,CACpBrD,UAAW,SACXgD,UAAW,UACX7xB,SAAU,mBACV8xB,UAAW,2BACXC,YAAa,oBACbC,MAAO,kBACPjwB,mBAAoB,QACpBnD,KAAM,UACN7E,OAAQ,0BACRtJ,UAAW,oBACXmzB,aAAc,yBACdoL,SAAU,UACVC,WAAY,kBACZnjB,SAAU,mBACVojB,SAAU,SACV+C,MAAO,4BACPte,QAAS,UAOX,MAAMwe,WAAgB1b,GACpB,WAAAP,CAAY5kB,EAASukB,GACnB,QAAsB,IAAX,EACT,MAAM,IAAIU,UAAU,+DAEtBG,MAAMplB,EAASukB,GAGf9D,KAAKqgB,YAAa,EAClBrgB,KAAKsgB,SAAW,EAChBtgB,KAAKugB,WAAa,KAClBvgB,KAAKwgB,eAAiB,CAAC,EACvBxgB,KAAKgS,QAAU,KACfhS,KAAKygB,iBAAmB,KACxBzgB,KAAK0gB,YAAc,KAGnB1gB,KAAK2gB,IAAM,KACX3gB,KAAK4gB,gBACA5gB,KAAK6E,QAAQ9K,UAChBiG,KAAK6gB,WAET,CAGA,kBAAWnd,GACT,OAAOmc,EACT,CACA,sBAAWlc,GACT,OAAOwc,EACT,CACA,eAAW5jB,GACT,MAxGW,SAyGb,CAGA,MAAAukB,GACE9gB,KAAKqgB,YAAa,CACpB,CACA,OAAAU,GACE/gB,KAAKqgB,YAAa,CACpB,CACA,aAAAW,GACEhhB,KAAKqgB,YAAcrgB,KAAKqgB,UAC1B,CACA,MAAA3Y,GACO1H,KAAKqgB,aAGVrgB,KAAKwgB,eAAeS,OAASjhB,KAAKwgB,eAAeS,MAC7CjhB,KAAKwP,WACPxP,KAAKkhB,SAGPlhB,KAAKmhB,SACP,CACA,OAAApc,GACEgI,aAAa/M,KAAKsgB,UAClB/f,GAAaC,IAAIR,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,mBAC3EphB,KAAK4E,SAASpJ,aAAa,2BAC7BwE,KAAK4E,SAASxjB,aAAa,QAAS4e,KAAK4E,SAASpJ,aAAa,2BAEjEwE,KAAKqhB,iBACL1c,MAAMI,SACR,CACA,IAAA2K,GACE,GAAoC,SAAhC1P,KAAK4E,SAAS7jB,MAAM6wB,QACtB,MAAM,IAAIhO,MAAM,uCAElB,IAAM5D,KAAKshB,mBAAoBthB,KAAKqgB,WAClC,OAEF,MAAM/G,EAAY/Y,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAlItD,SAoIX+b,GADa9lB,GAAeuE,KAAK4E,WACL5E,KAAK4E,SAAS9kB,cAAcwF,iBAAiBd,SAASwb,KAAK4E,UAC7F,GAAI0U,EAAUtX,mBAAqBuf,EACjC,OAIFvhB,KAAKqhB,iBACL,MAAMV,EAAM3gB,KAAKwhB,iBACjBxhB,KAAK4E,SAASxjB,aAAa,mBAAoBu/B,EAAInlB,aAAa,OAChE,MAAM,UACJukB,GACE/f,KAAK6E,QAYT,GAXK7E,KAAK4E,SAAS9kB,cAAcwF,gBAAgBd,SAASwb,KAAK2gB,OAC7DZ,EAAUpL,OAAOgM,GACjBpgB,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhJpC,cAkJnBxF,KAAKgS,QAAUhS,KAAKqS,cAAcsO,GAClCA,EAAItlB,UAAU5E,IAAIyoB,IAMd,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAac,GAAG9hB,EAAS,YAAaqc,IAU1CoE,KAAKmF,gBAPY,KACf5E,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAhKrC,WAiKQ,IAApBxF,KAAKugB,YACPvgB,KAAKkhB,SAEPlhB,KAAKugB,YAAa,CAAK,GAEKvgB,KAAK2gB,IAAK3gB,KAAK6N,cAC/C,CACA,IAAA4B,GACE,GAAKzP,KAAKwP,aAGQjP,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UA/KtD,SAgLHxD,iBAAd,CAQA,GALYhC,KAAKwhB,iBACbnmB,UAAU1B,OAAOulB,IAIjB,iBAAkB75B,SAASC,gBAC7B,IAAK,MAAM/F,IAAW,GAAGZ,UAAU0G,SAAS6G,KAAK4Z,UAC/CvF,GAAaC,IAAIjhB,EAAS,YAAaqc,IAG3CoE,KAAKwgB,eAA4B,OAAI,EACrCxgB,KAAKwgB,eAAelB,KAAiB,EACrCtf,KAAKwgB,eAAenB,KAAiB,EACrCrf,KAAKugB,WAAa,KAYlBvgB,KAAKmF,gBAVY,KACXnF,KAAKyhB,yBAGJzhB,KAAKugB,YACRvgB,KAAKqhB,iBAEPrhB,KAAK4E,SAASzjB,gBAAgB,oBAC9Bof,GAAaqB,QAAQ5B,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAzMpC,WAyM8D,GAEnDxF,KAAK2gB,IAAK3gB,KAAK6N,cA1B7C,CA2BF,CACA,MAAA9iB,GACMiV,KAAKgS,SACPhS,KAAKgS,QAAQjnB,QAEjB,CAGA,cAAAu2B,GACE,OAAOxgB,QAAQd,KAAK0hB,YACtB,CACA,cAAAF,GAIE,OAHKxhB,KAAK2gB,MACR3gB,KAAK2gB,IAAM3gB,KAAK2hB,kBAAkB3hB,KAAK0gB,aAAe1gB,KAAK4hB,2BAEtD5hB,KAAK2gB,GACd,CACA,iBAAAgB,CAAkB5E,GAChB,MAAM4D,EAAM3gB,KAAK6hB,oBAAoB9E,GAASc,SAG9C,IAAK8C,EACH,OAAO,KAETA,EAAItlB,UAAU1B,OAAOslB,GAAmBC,IAExCyB,EAAItlB,UAAU5E,IAAI,MAAMuJ,KAAKmE,YAAY5H,aACzC,MAAMulB,EAvuGKC,KACb,GACEA,GAAU5/B,KAAK6/B,MA/BH,IA+BS7/B,KAAK8/B,gBACnB58B,SAAS68B,eAAeH,IACjC,OAAOA,CAAM,EAmuGGI,CAAOniB,KAAKmE,YAAY5H,MAAM1c,WAK5C,OAJA8gC,EAAIv/B,aAAa,KAAM0gC,GACnB9hB,KAAK6N,eACP8S,EAAItlB,UAAU5E,IAAIwoB,IAEb0B,CACT,CACA,UAAAyB,CAAWrF,GACT/c,KAAK0gB,YAAc3D,EACf/c,KAAKwP,aACPxP,KAAKqhB,iBACLrhB,KAAK0P,OAET,CACA,mBAAAmS,CAAoB9E,GAYlB,OAXI/c,KAAKygB,iBACPzgB,KAAKygB,iBAAiB9C,cAAcZ,GAEpC/c,KAAKygB,iBAAmB,IAAIlD,GAAgB,IACvCvd,KAAK6E,QAGRkY,UACAC,WAAYhd,KAAKyd,yBAAyBzd,KAAK6E,QAAQmb,eAGpDhgB,KAAKygB,gBACd,CACA,sBAAAmB,GACE,MAAO,CACL,iBAA0B5hB,KAAK0hB,YAEnC,CACA,SAAAA,GACE,OAAO1hB,KAAKyd,yBAAyBzd,KAAK6E,QAAQqb,QAAUlgB,KAAK4E,SAASpJ,aAAa,yBACzF,CAGA,4BAAA6mB,CAA6BjjB,GAC3B,OAAOY,KAAKmE,YAAYmB,oBAAoBlG,EAAMW,eAAgBC,KAAKsiB,qBACzE,CACA,WAAAzU,GACE,OAAO7N,KAAK6E,QAAQib,WAAa9f,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAASy6B,GAC3E,CACA,QAAAzP,GACE,OAAOxP,KAAK2gB,KAAO3gB,KAAK2gB,IAAItlB,UAAU7W,SAAS06B,GACjD,CACA,aAAA7M,CAAcsO,GACZ,MAAMjiC,EAAYme,GAAQmD,KAAK6E,QAAQnmB,UAAW,CAACshB,KAAM2gB,EAAK3gB,KAAK4E,WAC7D2d,EAAahD,GAAc7gC,EAAU+lB,eAC3C,OAAO,GAAoBzE,KAAK4E,SAAU+b,EAAK3gB,KAAKyS,iBAAiB8P,GACvE,CACA,UAAA1P,GACE,MAAM,OACJ7qB,GACEgY,KAAK6E,QACT,MAAsB,iBAAX7c,EACFA,EAAO9F,MAAM,KAAKY,KAAInF,GAAS4f,OAAO6P,SAASzvB,EAAO,MAEzC,mBAAXqK,EACF8qB,GAAc9qB,EAAO8qB,EAAY9S,KAAK4E,UAExC5c,CACT,CACA,wBAAAy1B,CAAyBU,GACvB,OAAOthB,GAAQshB,EAAK,CAACne,KAAK4E,UAC5B,CACA,gBAAA6N,CAAiB8P,GACf,MAAMxP,EAAwB,CAC5Br0B,UAAW6jC,EACXnsB,UAAW,CAAC,CACV9V,KAAM,OACNmB,QAAS,CACPuO,mBAAoBgQ,KAAK6E,QAAQ7U,qBAElC,CACD1P,KAAM,SACNmB,QAAS,CACPuG,OAAQgY,KAAK6S,eAEd,CACDvyB,KAAM,kBACNmB,QAAS,CACPwM,SAAU+R,KAAK6E,QAAQ5W,WAExB,CACD3N,KAAM,QACNmB,QAAS,CACPlC,QAAS,IAAIygB,KAAKmE,YAAY5H,eAE/B,CACDjc,KAAM,kBACNC,SAAS,EACTC,MAAO,aACPC,GAAI4J,IAGF2V,KAAKwhB,iBAAiBpgC,aAAa,wBAAyBiJ,EAAK1J,MAAMjC,UAAU,KAIvF,MAAO,IACFq0B,KACAlW,GAAQmD,KAAK6E,QAAQgN,aAAc,CAACkB,IAE3C,CACA,aAAA6N,GACE,MAAM4B,EAAWxiB,KAAK6E,QAAQjD,QAAQ1f,MAAM,KAC5C,IAAK,MAAM0f,KAAW4gB,EACpB,GAAgB,UAAZ5gB,EACFrB,GAAac,GAAGrB,KAAK4E,SAAU5E,KAAKmE,YAAYqB,UAjVlC,SAiV4DxF,KAAK6E,QAAQ9K,UAAUqF,IAC/EY,KAAKqiB,6BAA6BjjB,GAC1CsI,QAAQ,SAEb,GA3VU,WA2VN9F,EAA4B,CACrC,MAAM6gB,EAAU7gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV5C,cAmV0ExF,KAAKmE,YAAYqB,UArV5F,WAsVVkd,EAAW9gB,IAAYyd,GAAgBrf,KAAKmE,YAAYqB,UAnV7C,cAmV2ExF,KAAKmE,YAAYqB,UArV5F,YAsVjBjF,GAAac,GAAGrB,KAAK4E,SAAU6d,EAASziB,KAAK6E,QAAQ9K,UAAUqF,IAC7D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,YAAfphB,EAAMqB,KAAqB6e,GAAgBD,KAAiB,EACnFlM,EAAQgO,QAAQ,IAElB5gB,GAAac,GAAGrB,KAAK4E,SAAU8d,EAAU1iB,KAAK6E,QAAQ9K,UAAUqF,IAC9D,MAAM+T,EAAUnT,KAAKqiB,6BAA6BjjB,GAClD+T,EAAQqN,eAA8B,aAAfphB,EAAMqB,KAAsB6e,GAAgBD,IAAiBlM,EAAQvO,SAASpgB,SAAS4a,EAAMU,eACpHqT,EAAQ+N,QAAQ,GAEpB,CAEFlhB,KAAKohB,kBAAoB,KACnBphB,KAAK4E,UACP5E,KAAKyP,MACP,EAEFlP,GAAac,GAAGrB,KAAK4E,SAAS5J,QAAQmkB,IAAiBC,GAAkBpf,KAAKohB,kBAChF,CACA,SAAAP,GACE,MAAMX,EAAQlgB,KAAK4E,SAASpJ,aAAa,SACpC0kB,IAGAlgB,KAAK4E,SAASpJ,aAAa,eAAkBwE,KAAK4E,SAAS0Z,YAAY3Y,QAC1E3F,KAAK4E,SAASxjB,aAAa,aAAc8+B,GAE3ClgB,KAAK4E,SAASxjB,aAAa,yBAA0B8+B,GACrDlgB,KAAK4E,SAASzjB,gBAAgB,SAChC,CACA,MAAAggC,GACMnhB,KAAKwP,YAAcxP,KAAKugB,WAC1BvgB,KAAKugB,YAAa,GAGpBvgB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACX3iB,KAAKugB,YACPvgB,KAAK0P,MACP,GACC1P,KAAK6E,QAAQob,MAAMvQ,MACxB,CACA,MAAAwR,GACMlhB,KAAKyhB,yBAGTzhB,KAAKugB,YAAa,EAClBvgB,KAAK2iB,aAAY,KACV3iB,KAAKugB,YACRvgB,KAAKyP,MACP,GACCzP,KAAK6E,QAAQob,MAAMxQ,MACxB,CACA,WAAAkT,CAAY/kB,EAASglB,GACnB7V,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAWziB,WAAWD,EAASglB,EACtC,CACA,oBAAAnB,GACE,OAAOzkC,OAAOmiB,OAAOa,KAAKwgB,gBAAgBpf,UAAS,EACrD,CACA,UAAAyC,CAAWC,GACT,MAAM+e,EAAiB7f,GAAYG,kBAAkBnD,KAAK4E,UAC1D,IAAK,MAAMke,KAAiB9lC,OAAO4D,KAAKiiC,GAClC7D,GAAsBroB,IAAImsB,WACrBD,EAAeC,GAU1B,OAPAhf,EAAS,IACJ+e,KACmB,iBAAX/e,GAAuBA,EAASA,EAAS,CAAC,GAEvDA,EAAS9D,KAAK+D,gBAAgBD,GAC9BA,EAAS9D,KAAKgE,kBAAkBF,GAChC9D,KAAKiE,iBAAiBH,GACfA,CACT,CACA,iBAAAE,CAAkBF,GAchB,OAbAA,EAAOic,WAAiC,IAArBjc,EAAOic,UAAsB16B,SAAS6G,KAAOwO,GAAWoJ,EAAOic,WACtD,iBAAjBjc,EAAOmc,QAChBnc,EAAOmc,MAAQ,CACbvQ,KAAM5L,EAAOmc,MACbxQ,KAAM3L,EAAOmc,QAGW,iBAAjBnc,EAAOoc,QAChBpc,EAAOoc,MAAQpc,EAAOoc,MAAMrgC,YAEA,iBAAnBikB,EAAOiZ,UAChBjZ,EAAOiZ,QAAUjZ,EAAOiZ,QAAQl9B,YAE3BikB,CACT,CACA,kBAAAwe,GACE,MAAMxe,EAAS,CAAC,EAChB,IAAK,MAAOhnB,EAAKa,KAAUX,OAAOmkB,QAAQnB,KAAK6E,SACzC7E,KAAKmE,YAAYT,QAAQ5mB,KAASa,IACpCmmB,EAAOhnB,GAAOa,GASlB,OANAmmB,EAAO/J,UAAW,EAClB+J,EAAOlC,QAAU,SAKVkC,CACT,CACA,cAAAud,GACMrhB,KAAKgS,UACPhS,KAAKgS,QAAQhZ,UACbgH,KAAKgS,QAAU,MAEbhS,KAAK2gB,MACP3gB,KAAK2gB,IAAIhnB,SACTqG,KAAK2gB,IAAM,KAEf,CAGA,sBAAOlkB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO+1B,GAAQ9a,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmBikB,IAcnB,MAGM2C,GAAY,IACb3C,GAAQ1c,QACXqZ,QAAS,GACT/0B,OAAQ,CAAC,EAAG,GACZtJ,UAAW,QACXy+B,SAAU,8IACVvb,QAAS,SAELohB,GAAgB,IACjB5C,GAAQzc,YACXoZ,QAAS,kCAOX,MAAMkG,WAAgB7C,GAEpB,kBAAW1c,GACT,OAAOqf,EACT,CACA,sBAAWpf,GACT,OAAOqf,EACT,CACA,eAAWzmB,GACT,MA7BW,SA8Bb,CAGA,cAAA+kB,GACE,OAAOthB,KAAK0hB,aAAe1hB,KAAKkjB,aAClC,CAGA,sBAAAtB,GACE,MAAO,CACL,kBAAkB5hB,KAAK0hB,YACvB,gBAAoB1hB,KAAKkjB,cAE7B,CACA,WAAAA,GACE,OAAOljB,KAAKyd,yBAAyBzd,KAAK6E,QAAQkY,QACpD,CAGA,sBAAOtgB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO44B,GAAQ3d,oBAAoBtF,KAAM8D,GAC/C,GAAsB,iBAAXA,EAAX,CAGA,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOF3H,GAAmB8mB,IAcnB,MAEME,GAAc,gBAEdC,GAAiB,WAAWD,KAC5BE,GAAc,QAAQF,KACtBG,GAAwB,OAAOH,cAE/BI,GAAsB,SAEtBC,GAAwB,SAExBC,GAAqB,YAGrBC,GAAsB,GAAGD,mBAA+CA,uBAGxEE,GAAY,CAChB37B,OAAQ,KAER47B,WAAY,eACZC,cAAc,EACdt3B,OAAQ,KACRu3B,UAAW,CAAC,GAAK,GAAK,IAElBC,GAAgB,CACpB/7B,OAAQ,gBAER47B,WAAY,SACZC,aAAc,UACdt3B,OAAQ,UACRu3B,UAAW,SAOb,MAAME,WAAkBtf,GACtB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GAGf9D,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B8O,KAAKmkB,aAA6D,YAA9Cl/B,iBAAiB+a,KAAK4E,UAAU5Y,UAA0B,KAAOgU,KAAK4E,SAC1F5E,KAAKokB,cAAgB,KACrBpkB,KAAKqkB,UAAY,KACjBrkB,KAAKskB,oBAAsB,CACzBC,gBAAiB,EACjBC,gBAAiB,GAEnBxkB,KAAKykB,SACP,CAGA,kBAAW/gB,GACT,OAAOigB,EACT,CACA,sBAAWhgB,GACT,OAAOogB,EACT,CACA,eAAWxnB,GACT,MAhEW,WAiEb,CAGA,OAAAkoB,GACEzkB,KAAK0kB,mCACL1kB,KAAK2kB,2BACD3kB,KAAKqkB,UACPrkB,KAAKqkB,UAAUO,aAEf5kB,KAAKqkB,UAAYrkB,KAAK6kB,kBAExB,IAAK,MAAMC,KAAW9kB,KAAKkkB,oBAAoB/kB,SAC7Ca,KAAKqkB,UAAUU,QAAQD,EAE3B,CACA,OAAA/f,GACE/E,KAAKqkB,UAAUO,aACfjgB,MAAMI,SACR,CAGA,iBAAAf,CAAkBF,GAShB,OAPAA,EAAOvX,OAASmO,GAAWoJ,EAAOvX,SAAWlH,SAAS6G,KAGtD4X,EAAO8f,WAAa9f,EAAO9b,OAAS,GAAG8b,EAAO9b,oBAAsB8b,EAAO8f,WAC3C,iBAArB9f,EAAOggB,YAChBhgB,EAAOggB,UAAYhgB,EAAOggB,UAAU5hC,MAAM,KAAKY,KAAInF,GAAS4f,OAAOC,WAAW7f,MAEzEmmB,CACT,CACA,wBAAA6gB,GACO3kB,KAAK6E,QAAQgf,eAKlBtjB,GAAaC,IAAIR,KAAK6E,QAAQtY,OAAQ82B,IACtC9iB,GAAac,GAAGrB,KAAK6E,QAAQtY,OAAQ82B,GAAaG,IAAuBpkB,IACvE,MAAM4lB,EAAoBhlB,KAAKkkB,oBAAoB/mC,IAAIiiB,EAAM7S,OAAOtB,MACpE,GAAI+5B,EAAmB,CACrB5lB,EAAMkD,iBACN,MAAM3G,EAAOqE,KAAKmkB,cAAgBvkC,OAC5BmE,EAASihC,EAAkB3gC,UAAY2b,KAAK4E,SAASvgB,UAC3D,GAAIsX,EAAKspB,SAKP,YAJAtpB,EAAKspB,SAAS,CACZtjC,IAAKoC,EACLmhC,SAAU,WAMdvpB,EAAKlQ,UAAY1H,CACnB,KAEJ,CACA,eAAA8gC,GACE,MAAMpjC,EAAU,CACdka,KAAMqE,KAAKmkB,aACXL,UAAW9jB,KAAK6E,QAAQif,UACxBF,WAAY5jB,KAAK6E,QAAQ+e,YAE3B,OAAO,IAAIuB,sBAAqBhkB,GAAWnB,KAAKolB,kBAAkBjkB,IAAU1f,EAC9E,CAGA,iBAAA2jC,CAAkBjkB,GAChB,MAAMkkB,EAAgB/H,GAAStd,KAAKikB,aAAa9mC,IAAI,IAAImgC,EAAM/wB,OAAO4N,MAChEob,EAAW+H,IACftd,KAAKskB,oBAAoBC,gBAAkBjH,EAAM/wB,OAAOlI,UACxD2b,KAAKslB,SAASD,EAAc/H,GAAO,EAE/BkH,GAAmBxkB,KAAKmkB,cAAgB9+B,SAASC,iBAAiBmG,UAClE85B,EAAkBf,GAAmBxkB,KAAKskB,oBAAoBE,gBACpExkB,KAAKskB,oBAAoBE,gBAAkBA,EAC3C,IAAK,MAAMlH,KAASnc,EAAS,CAC3B,IAAKmc,EAAMkI,eAAgB,CACzBxlB,KAAKokB,cAAgB,KACrBpkB,KAAKylB,kBAAkBJ,EAAc/H,IACrC,QACF,CACA,MAAMoI,EAA2BpI,EAAM/wB,OAAOlI,WAAa2b,KAAKskB,oBAAoBC,gBAEpF,GAAIgB,GAAmBG,GAGrB,GAFAnQ,EAAS+H,IAEJkH,EACH,YAMCe,GAAoBG,GACvBnQ,EAAS+H,EAEb,CACF,CACA,gCAAAoH,GACE1kB,KAAKikB,aAAe,IAAI/yB,IACxB8O,KAAKkkB,oBAAsB,IAAIhzB,IAC/B,MAAMy0B,EAAc/f,GAAezT,KAAKqxB,GAAuBxjB,KAAK6E,QAAQtY,QAC5E,IAAK,MAAMq5B,KAAUD,EAAa,CAEhC,IAAKC,EAAO36B,MAAQiQ,GAAW0qB,GAC7B,SAEF,MAAMZ,EAAoBpf,GAAeC,QAAQggB,UAAUD,EAAO36B,MAAO+U,KAAK4E,UAG1EjK,GAAUqqB,KACZhlB,KAAKikB,aAAalyB,IAAI8zB,UAAUD,EAAO36B,MAAO26B,GAC9C5lB,KAAKkkB,oBAAoBnyB,IAAI6zB,EAAO36B,KAAM+5B,GAE9C,CACF,CACA,QAAAM,CAAS/4B,GACHyT,KAAKokB,gBAAkB73B,IAG3ByT,KAAKylB,kBAAkBzlB,KAAK6E,QAAQtY,QACpCyT,KAAKokB,cAAgB73B,EACrBA,EAAO8O,UAAU5E,IAAI8sB,IACrBvjB,KAAK8lB,iBAAiBv5B,GACtBgU,GAAaqB,QAAQ5B,KAAK4E,SAAUwe,GAAgB,CAClDtjB,cAAevT,IAEnB,CACA,gBAAAu5B,CAAiBv5B,GAEf,GAAIA,EAAO8O,UAAU7W,SA9LQ,iBA+L3BohB,GAAeC,QArLc,mBAqLsBtZ,EAAOyO,QAtLtC,cAsLkEK,UAAU5E,IAAI8sB,SAGtG,IAAK,MAAMwC,KAAangB,GAAeI,QAAQzZ,EA9LnB,qBAiM1B,IAAK,MAAMxJ,KAAQ6iB,GAAeM,KAAK6f,EAAWrC,IAChD3gC,EAAKsY,UAAU5E,IAAI8sB,GAGzB,CACA,iBAAAkC,CAAkBhhC,GAChBA,EAAO4W,UAAU1B,OAAO4pB,IACxB,MAAMyC,EAAcpgB,GAAezT,KAAK,GAAGqxB,MAAyBD,KAAuB9+B,GAC3F,IAAK,MAAM9E,KAAQqmC,EACjBrmC,EAAK0b,UAAU1B,OAAO4pB,GAE1B,CAGA,sBAAO9mB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAO25B,GAAU1e,oBAAoBtF,KAAM8D,GACjD,GAAsB,iBAAXA,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGzhB,OAAQ0jC,IAAuB,KAC7C,IAAK,MAAM2C,KAAOrgB,GAAezT,KApOT,0BAqOtB6xB,GAAU1e,oBAAoB2gB,EAChC,IAOF9pB,GAAmB6nB,IAcnB,MAEMkC,GAAc,UACdC,GAAe,OAAOD,KACtBE,GAAiB,SAASF,KAC1BG,GAAe,OAAOH,KACtBI,GAAgB,QAAQJ,KACxBK,GAAuB,QAAQL,KAC/BM,GAAgB,UAAUN,KAC1BO,GAAsB,OAAOP,KAC7BQ,GAAiB,YACjBC,GAAkB,aAClBC,GAAe,UACfC,GAAiB,YACjBC,GAAW,OACXC,GAAU,MACVC,GAAoB,SACpBC,GAAoB,OACpBC,GAAoB,OAEpBC,GAA2B,mBAE3BC,GAA+B,QAAQD,MAIvCE,GAAuB,2EACvBC,GAAsB,YAFOF,uBAAiDA,mBAA6CA,OAE/EC,KAC5CE,GAA8B,IAAIP,8BAA6CA,+BAA8CA,4BAMnI,MAAMQ,WAAY9iB,GAChB,WAAAP,CAAY5kB,GACVolB,MAAMplB,GACNygB,KAAKiS,QAAUjS,KAAK4E,SAAS5J,QAdN,uCAelBgF,KAAKiS,UAOVjS,KAAKynB,sBAAsBznB,KAAKiS,QAASjS,KAAK0nB,gBAC9CnnB,GAAac,GAAGrB,KAAK4E,SAAU4hB,IAAepnB,GAASY,KAAK0M,SAAStN,KACvE,CAGA,eAAW7C,GACT,MAnDW,KAoDb,CAGA,IAAAmT,GAEE,MAAMiY,EAAY3nB,KAAK4E,SACvB,GAAI5E,KAAK4nB,cAAcD,GACrB,OAIF,MAAME,EAAS7nB,KAAK8nB,iBACdC,EAAYF,EAAStnB,GAAaqB,QAAQimB,EAAQ1B,GAAc,CACpErmB,cAAe6nB,IACZ,KACapnB,GAAaqB,QAAQ+lB,EAAWtB,GAAc,CAC9DvmB,cAAe+nB,IAEH7lB,kBAAoB+lB,GAAaA,EAAU/lB,mBAGzDhC,KAAKgoB,YAAYH,EAAQF,GACzB3nB,KAAKioB,UAAUN,EAAWE,GAC5B,CAGA,SAAAI,CAAU1oC,EAAS2oC,GACZ3oC,IAGLA,EAAQ8b,UAAU5E,IAAIuwB,IACtBhnB,KAAKioB,UAAUriB,GAAec,uBAAuBnnB,IAcrDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ4B,gBAAgB,YACxB5B,EAAQ6B,aAAa,iBAAiB,GACtC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS+mC,GAAe,CAC3CxmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU5E,IAAIywB,GAQtB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,WAAAe,CAAYzoC,EAAS2oC,GACd3oC,IAGLA,EAAQ8b,UAAU1B,OAAOqtB,IACzBznC,EAAQm7B,OACR1a,KAAKgoB,YAAYpiB,GAAec,uBAAuBnnB,IAcvDygB,KAAKmF,gBAZY,KACsB,QAAjC5lB,EAAQic,aAAa,SAIzBjc,EAAQ6B,aAAa,iBAAiB,GACtC7B,EAAQ6B,aAAa,WAAY,MACjC4e,KAAKmoB,gBAAgB5oC,GAAS,GAC9BghB,GAAaqB,QAAQriB,EAAS6mC,GAAgB,CAC5CtmB,cAAeooB,KAPf3oC,EAAQ8b,UAAU1B,OAAOutB,GAQzB,GAE0B3nC,EAASA,EAAQ8b,UAAU7W,SAASyiC,KACpE,CACA,QAAAva,CAAStN,GACP,IAAK,CAACsnB,GAAgBC,GAAiBC,GAAcC,GAAgBC,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrG,OAEFsiB,EAAMuU,kBACNvU,EAAMkD,iBACN,MAAMwD,EAAW9F,KAAK0nB,eAAevhC,QAAO5G,IAAY2b,GAAW3b,KACnE,IAAI6oC,EACJ,GAAI,CAACtB,GAAUC,IAAS3lB,SAAShC,EAAMtiB,KACrCsrC,EAAoBtiB,EAAS1G,EAAMtiB,MAAQgqC,GAAW,EAAIhhB,EAASpV,OAAS,OACvE,CACL,MAAM2c,EAAS,CAACsZ,GAAiBE,IAAgBzlB,SAAShC,EAAMtiB,KAChEsrC,EAAoBtqB,GAAqBgI,EAAU1G,EAAM7S,OAAQ8gB,GAAQ,EAC3E,CACI+a,IACFA,EAAkB9V,MAAM,CACtB+V,eAAe,IAEjBb,GAAIliB,oBAAoB8iB,GAAmB1Y,OAE/C,CACA,YAAAgY,GAEE,OAAO9hB,GAAezT,KAAKm1B,GAAqBtnB,KAAKiS,QACvD,CACA,cAAA6V,GACE,OAAO9nB,KAAK0nB,eAAev1B,MAAKzN,GAASsb,KAAK4nB,cAAcljC,MAAW,IACzE,CACA,qBAAA+iC,CAAsBhjC,EAAQqhB,GAC5B9F,KAAKsoB,yBAAyB7jC,EAAQ,OAAQ,WAC9C,IAAK,MAAMC,KAASohB,EAClB9F,KAAKuoB,6BAA6B7jC,EAEtC,CACA,4BAAA6jC,CAA6B7jC,GAC3BA,EAAQsb,KAAKwoB,iBAAiB9jC,GAC9B,MAAM+jC,EAAWzoB,KAAK4nB,cAAcljC,GAC9BgkC,EAAY1oB,KAAK2oB,iBAAiBjkC,GACxCA,EAAMtD,aAAa,gBAAiBqnC,GAChCC,IAAchkC,GAChBsb,KAAKsoB,yBAAyBI,EAAW,OAAQ,gBAE9CD,GACH/jC,EAAMtD,aAAa,WAAY,MAEjC4e,KAAKsoB,yBAAyB5jC,EAAO,OAAQ,OAG7Csb,KAAK4oB,mCAAmClkC,EAC1C,CACA,kCAAAkkC,CAAmClkC,GACjC,MAAM6H,EAASqZ,GAAec,uBAAuBhiB,GAChD6H,IAGLyT,KAAKsoB,yBAAyB/7B,EAAQ,OAAQ,YAC1C7H,EAAMyV,IACR6F,KAAKsoB,yBAAyB/7B,EAAQ,kBAAmB,GAAG7H,EAAMyV,MAEtE,CACA,eAAAguB,CAAgB5oC,EAASspC,GACvB,MAAMH,EAAY1oB,KAAK2oB,iBAAiBppC,GACxC,IAAKmpC,EAAUrtB,UAAU7W,SApKN,YAqKjB,OAEF,MAAMkjB,EAAS,CAAC3N,EAAUia,KACxB,MAAMz0B,EAAUqmB,GAAeC,QAAQ9L,EAAU2uB,GAC7CnpC,GACFA,EAAQ8b,UAAUqM,OAAOsM,EAAW6U,EACtC,EAEFnhB,EAAOyf,GAA0BH,IACjCtf,EA5K2B,iBA4KIwf,IAC/BwB,EAAUtnC,aAAa,gBAAiBynC,EAC1C,CACA,wBAAAP,CAAyB/oC,EAASwC,EAAWpE,GACtC4B,EAAQgc,aAAaxZ,IACxBxC,EAAQ6B,aAAaW,EAAWpE,EAEpC,CACA,aAAAiqC,CAAczY,GACZ,OAAOA,EAAK9T,UAAU7W,SAASwiC,GACjC,CAGA,gBAAAwB,CAAiBrZ,GACf,OAAOA,EAAKpJ,QAAQuhB,IAAuBnY,EAAOvJ,GAAeC,QAAQyhB,GAAqBnY,EAChG,CAGA,gBAAAwZ,CAAiBxZ,GACf,OAAOA,EAAKnU,QA5LO,gCA4LoBmU,CACzC,CAGA,sBAAO1S,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOm9B,GAAIliB,oBAAoBtF,MACrC,GAAsB,iBAAX8D,EAAX,CAGA,QAAqB/K,IAAjB1O,EAAKyZ,IAAyBA,EAAOrC,WAAW,MAAmB,gBAAXqC,EAC1D,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,IAJL,CAKF,GACF,EAOFvD,GAAac,GAAGhc,SAAUkhC,GAAsBc,IAAsB,SAAUjoB,GAC1E,CAAC,IAAK,QAAQgC,SAASpB,KAAKgH,UAC9B5H,EAAMkD,iBAEJpH,GAAW8E,OAGfwnB,GAAIliB,oBAAoBtF,MAAM0P,MAChC,IAKAnP,GAAac,GAAGzhB,OAAQ6mC,IAAqB,KAC3C,IAAK,MAAMlnC,KAAWqmB,GAAezT,KAAKo1B,IACxCC,GAAIliB,oBAAoB/lB,EAC1B,IAMF4c,GAAmBqrB,IAcnB,MAEMxiB,GAAY,YACZ8jB,GAAkB,YAAY9jB,KAC9B+jB,GAAiB,WAAW/jB,KAC5BgkB,GAAgB,UAAUhkB,KAC1BikB,GAAiB,WAAWjkB,KAC5BkkB,GAAa,OAAOlkB,KACpBmkB,GAAe,SAASnkB,KACxBokB,GAAa,OAAOpkB,KACpBqkB,GAAc,QAAQrkB,KAEtBskB,GAAkB,OAClBC,GAAkB,OAClBC,GAAqB,UACrB7lB,GAAc,CAClBmc,UAAW,UACX2J,SAAU,UACVxJ,MAAO,UAEHvc,GAAU,CACdoc,WAAW,EACX2J,UAAU,EACVxJ,MAAO,KAOT,MAAMyJ,WAAchlB,GAClB,WAAAP,CAAY5kB,EAASukB,GACnBa,MAAMplB,EAASukB,GACf9D,KAAKsgB,SAAW,KAChBtgB,KAAK2pB,sBAAuB,EAC5B3pB,KAAK4pB,yBAA0B,EAC/B5pB,KAAK4gB,eACP,CAGA,kBAAWld,GACT,OAAOA,EACT,CACA,sBAAWC,GACT,OAAOA,EACT,CACA,eAAWpH,GACT,MA/CS,OAgDX,CAGA,IAAAmT,GACoBnP,GAAaqB,QAAQ5B,KAAK4E,SAAUwkB,IACxCpnB,mBAGdhC,KAAK6pB,gBACD7pB,KAAK6E,QAAQib,WACf9f,KAAK4E,SAASvJ,UAAU5E,IA/CN,QAsDpBuJ,KAAK4E,SAASvJ,UAAU1B,OAAO2vB,IAC/BztB,GAAOmE,KAAK4E,UACZ5E,KAAK4E,SAASvJ,UAAU5E,IAAI8yB,GAAiBC,IAC7CxpB,KAAKmF,gBARY,KACfnF,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,IAC/BjpB,GAAaqB,QAAQ5B,KAAK4E,SAAUykB,IACpCrpB,KAAK8pB,oBAAoB,GAKG9pB,KAAK4E,SAAU5E,KAAK6E,QAAQib,WAC5D,CACA,IAAArQ,GACOzP,KAAK+pB,YAGQxpB,GAAaqB,QAAQ5B,KAAK4E,SAAUskB,IACxClnB,mBAQdhC,KAAK4E,SAASvJ,UAAU5E,IAAI+yB,IAC5BxpB,KAAKmF,gBANY,KACfnF,KAAK4E,SAASvJ,UAAU5E,IAAI6yB,IAC5BtpB,KAAK4E,SAASvJ,UAAU1B,OAAO6vB,GAAoBD,IACnDhpB,GAAaqB,QAAQ5B,KAAK4E,SAAUukB,GAAa,GAGrBnpB,KAAK4E,SAAU5E,KAAK6E,QAAQib,YAC5D,CACA,OAAA/a,GACE/E,KAAK6pB,gBACD7pB,KAAK+pB,WACP/pB,KAAK4E,SAASvJ,UAAU1B,OAAO4vB,IAEjC5kB,MAAMI,SACR,CACA,OAAAglB,GACE,OAAO/pB,KAAK4E,SAASvJ,UAAU7W,SAAS+kC,GAC1C,CAIA,kBAAAO,GACO9pB,KAAK6E,QAAQ4kB,WAGdzpB,KAAK2pB,sBAAwB3pB,KAAK4pB,0BAGtC5pB,KAAKsgB,SAAWziB,YAAW,KACzBmC,KAAKyP,MAAM,GACVzP,KAAK6E,QAAQob,QAClB,CACA,cAAA+J,CAAe5qB,EAAO6qB,GACpB,OAAQ7qB,EAAMqB,MACZ,IAAK,YACL,IAAK,WAEDT,KAAK2pB,qBAAuBM,EAC5B,MAEJ,IAAK,UACL,IAAK,WAEDjqB,KAAK4pB,wBAA0BK,EAIrC,GAAIA,EAEF,YADAjqB,KAAK6pB,gBAGP,MAAMvc,EAAclO,EAAMU,cACtBE,KAAK4E,WAAa0I,GAAetN,KAAK4E,SAASpgB,SAAS8oB,IAG5DtN,KAAK8pB,oBACP,CACA,aAAAlJ,GACErgB,GAAac,GAAGrB,KAAK4E,SAAUkkB,IAAiB1pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACpFmB,GAAac,GAAGrB,KAAK4E,SAAUmkB,IAAgB3pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KACnFmB,GAAac,GAAGrB,KAAK4E,SAAUokB,IAAe5pB,GAASY,KAAKgqB,eAAe5qB,GAAO,KAClFmB,GAAac,GAAGrB,KAAK4E,SAAUqkB,IAAgB7pB,GAASY,KAAKgqB,eAAe5qB,GAAO,IACrF,CACA,aAAAyqB,GACE9c,aAAa/M,KAAKsgB,UAClBtgB,KAAKsgB,SAAW,IAClB,CAGA,sBAAO7jB,CAAgBqH,GACrB,OAAO9D,KAAKuH,MAAK,WACf,MAAMld,EAAOq/B,GAAMpkB,oBAAoBtF,KAAM8D,GAC7C,GAAsB,iBAAXA,EAAqB,CAC9B,QAA4B,IAAjBzZ,EAAKyZ,GACd,MAAM,IAAIU,UAAU,oBAAoBV,MAE1CzZ,EAAKyZ,GAAQ9D,KACf,CACF,GACF,ECr0IK,SAASkqB,GAAc7tB,GACD,WAAvBhX,SAASuX,WAAyBP,IACjChX,SAASyF,iBAAiB,mBAAoBuR,EACrD,CDy0IAuK,GAAqB8iB,IAMrBvtB,GAAmButB,IEtyInBQ,IAvCA,WAC2B,GAAG93B,MAAM5U,KAChC6H,SAAS+a,iBAAiB,+BAETtd,KAAI,SAAUqnC,GAC/B,OAAO,IAAI/J,GAAQ+J,EAAkB,CAAElK,MAAO,CAAEvQ,KAAM,IAAKD,KAAM,MACnE,GACF,IAiCAya,IA5BA,WACY7kC,SAAS68B,eAAe,mBAC9Bp3B,iBAAiB,SAAS,WAC5BzF,SAAS6G,KAAKT,UAAY,EAC1BpG,SAASC,gBAAgBmG,UAAY,CACvC,GACF,IAuBAy+B,IArBA,WACE,IAAIE,EAAM/kC,SAAS68B,eAAe,mBAC9BmI,EAAShlC,SACVilC,uBAAuB,aAAa,GACpChnC,wBACH1D,OAAOkL,iBAAiB,UAAU,WAC5BkV,KAAKuqB,UAAYvqB,KAAKwqB,SAAWxqB,KAAKwqB,QAAUH,EAAOzsC,OACzDwsC,EAAIrpC,MAAM6wB,QAAU,QAEpBwY,EAAIrpC,MAAM6wB,QAAU,OAEtB5R,KAAKuqB,UAAYvqB,KAAKwqB,OACxB,GACF","sources":["webpack://pydata_sphinx_theme/webpack/bootstrap","webpack://pydata_sphinx_theme/webpack/runtime/define property getters","webpack://pydata_sphinx_theme/webpack/runtime/hasOwnProperty shorthand","webpack://pydata_sphinx_theme/webpack/runtime/make namespace object","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/enums.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/instanceOf.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/applyStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getBasePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/math.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/userAgent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isLayoutViewport.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getBoundingClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getLayoutRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/contains.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getComputedStyle.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isTableElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentElement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getParentNode.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getOffsetParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getMainAxisFromPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/within.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergePaddingObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getFreshSideObject.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/expandToHashMap.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/arrow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getVariation.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/computeStyles.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/eventListeners.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositePlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getOppositeVariationPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getWindowScrollBarX.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/isScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getScrollParent.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/listScrollParents.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/rectToClientRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getClippingRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getViewportRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getDocumentRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/detectOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/flip.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/computeAutoPlacement.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/hide.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/offset.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/popperOffsets.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/modifiers/preventOverflow.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/getAltAxis.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getCompositeRect.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getNodeScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/dom-utils/getHTMLElementScroll.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/orderModifiers.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/createPopper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/debounce.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/utils/mergeByName.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper.js","webpack://pydata_sphinx_theme/./node_modules/@popperjs/core/lib/popper-lite.js","webpack://pydata_sphinx_theme/./node_modules/bootstrap/dist/js/bootstrap.esm.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/mixin.js","webpack://pydata_sphinx_theme/./src/pydata_sphinx_theme/assets/scripts/bootstrap.js"],"sourcesContent":["// The require scope\nvar __webpack_require__ = {};\n\n","// define getter functions for harmony exports\n__webpack_require__.d = (exports, definition) => {\n\tfor(var key in definition) {\n\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n\t\t}\n\t}\n};","__webpack_require__.o = (obj, prop) => (Object.prototype.hasOwnProperty.call(obj, prop))","// define __esModule on exports\n__webpack_require__.r = (exports) => {\n\tif(typeof Symbol !== 'undefined' && Symbol.toStringTag) {\n\t\tObject.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });\n\t}\n\tObject.defineProperty(exports, '__esModule', { value: true });\n};","export var top = 'top';\nexport var bottom = 'bottom';\nexport var right = 'right';\nexport var left = 'left';\nexport var auto = 'auto';\nexport var basePlacements = [top, bottom, right, left];\nexport var start = 'start';\nexport var end = 'end';\nexport var clippingParents = 'clippingParents';\nexport var viewport = 'viewport';\nexport var popper = 'popper';\nexport var reference = 'reference';\nexport var variationPlacements = /*#__PURE__*/basePlacements.reduce(function (acc, placement) {\n return acc.concat([placement + \"-\" + start, placement + \"-\" + end]);\n}, []);\nexport var placements = /*#__PURE__*/[].concat(basePlacements, [auto]).reduce(function (acc, placement) {\n return acc.concat([placement, placement + \"-\" + start, placement + \"-\" + end]);\n}, []); // modifiers that need to read the DOM\n\nexport var beforeRead = 'beforeRead';\nexport var read = 'read';\nexport var afterRead = 'afterRead'; // pure-logic modifiers\n\nexport var beforeMain = 'beforeMain';\nexport var main = 'main';\nexport var afterMain = 'afterMain'; // modifier with the purpose to write to the DOM (or write into a framework state)\n\nexport var beforeWrite = 'beforeWrite';\nexport var write = 'write';\nexport var afterWrite = 'afterWrite';\nexport var modifierPhases = [beforeRead, read, afterRead, beforeMain, main, afterMain, beforeWrite, write, afterWrite];","export default function getNodeName(element) {\n return element ? (element.nodeName || '').toLowerCase() : null;\n}","export default function getWindow(node) {\n if (node == null) {\n return window;\n }\n\n if (node.toString() !== '[object Window]') {\n var ownerDocument = node.ownerDocument;\n return ownerDocument ? ownerDocument.defaultView || window : window;\n }\n\n return node;\n}","import getWindow from \"./getWindow.js\";\n\nfunction isElement(node) {\n var OwnElement = getWindow(node).Element;\n return node instanceof OwnElement || node instanceof Element;\n}\n\nfunction isHTMLElement(node) {\n var OwnElement = getWindow(node).HTMLElement;\n return node instanceof OwnElement || node instanceof HTMLElement;\n}\n\nfunction isShadowRoot(node) {\n // IE 11 has no ShadowRoot\n if (typeof ShadowRoot === 'undefined') {\n return false;\n }\n\n var OwnElement = getWindow(node).ShadowRoot;\n return node instanceof OwnElement || node instanceof ShadowRoot;\n}\n\nexport { isElement, isHTMLElement, isShadowRoot };","import getNodeName from \"../dom-utils/getNodeName.js\";\nimport { isHTMLElement } from \"../dom-utils/instanceOf.js\"; // This modifier takes the styles prepared by the `computeStyles` modifier\n// and applies them to the HTMLElements such as popper and arrow\n\nfunction applyStyles(_ref) {\n var state = _ref.state;\n Object.keys(state.elements).forEach(function (name) {\n var style = state.styles[name] || {};\n var attributes = state.attributes[name] || {};\n var element = state.elements[name]; // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n } // Flow doesn't support to extend this property, but it's the most\n // effective way to apply styles to an HTMLElement\n // $FlowFixMe[cannot-write]\n\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (name) {\n var value = attributes[name];\n\n if (value === false) {\n element.removeAttribute(name);\n } else {\n element.setAttribute(name, value === true ? '' : value);\n }\n });\n });\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state;\n var initialStyles = {\n popper: {\n position: state.options.strategy,\n left: '0',\n top: '0',\n margin: '0'\n },\n arrow: {\n position: 'absolute'\n },\n reference: {}\n };\n Object.assign(state.elements.popper.style, initialStyles.popper);\n state.styles = initialStyles;\n\n if (state.elements.arrow) {\n Object.assign(state.elements.arrow.style, initialStyles.arrow);\n }\n\n return function () {\n Object.keys(state.elements).forEach(function (name) {\n var element = state.elements[name];\n var attributes = state.attributes[name] || {};\n var styleProperties = Object.keys(state.styles.hasOwnProperty(name) ? state.styles[name] : initialStyles[name]); // Set all values to an empty string to unset them\n\n var style = styleProperties.reduce(function (style, property) {\n style[property] = '';\n return style;\n }, {}); // arrow is optional + virtual elements\n\n if (!isHTMLElement(element) || !getNodeName(element)) {\n return;\n }\n\n Object.assign(element.style, style);\n Object.keys(attributes).forEach(function (attribute) {\n element.removeAttribute(attribute);\n });\n });\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'applyStyles',\n enabled: true,\n phase: 'write',\n fn: applyStyles,\n effect: effect,\n requires: ['computeStyles']\n};","import { auto } from \"../enums.js\";\nexport default function getBasePlacement(placement) {\n return placement.split('-')[0];\n}","export var max = Math.max;\nexport var min = Math.min;\nexport var round = Math.round;","export default function getUAString() {\n var uaData = navigator.userAgentData;\n\n if (uaData != null && uaData.brands && Array.isArray(uaData.brands)) {\n return uaData.brands.map(function (item) {\n return item.brand + \"/\" + item.version;\n }).join(' ');\n }\n\n return navigator.userAgent;\n}","import getUAString from \"../utils/userAgent.js\";\nexport default function isLayoutViewport() {\n return !/^((?!chrome|android).)*safari/i.test(getUAString());\n}","import { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport { round } from \"../utils/math.js\";\nimport getWindow from \"./getWindow.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getBoundingClientRect(element, includeScale, isFixedStrategy) {\n if (includeScale === void 0) {\n includeScale = false;\n }\n\n if (isFixedStrategy === void 0) {\n isFixedStrategy = false;\n }\n\n var clientRect = element.getBoundingClientRect();\n var scaleX = 1;\n var scaleY = 1;\n\n if (includeScale && isHTMLElement(element)) {\n scaleX = element.offsetWidth > 0 ? round(clientRect.width) / element.offsetWidth || 1 : 1;\n scaleY = element.offsetHeight > 0 ? round(clientRect.height) / element.offsetHeight || 1 : 1;\n }\n\n var _ref = isElement(element) ? getWindow(element) : window,\n visualViewport = _ref.visualViewport;\n\n var addVisualOffsets = !isLayoutViewport() && isFixedStrategy;\n var x = (clientRect.left + (addVisualOffsets && visualViewport ? visualViewport.offsetLeft : 0)) / scaleX;\n var y = (clientRect.top + (addVisualOffsets && visualViewport ? visualViewport.offsetTop : 0)) / scaleY;\n var width = clientRect.width / scaleX;\n var height = clientRect.height / scaleY;\n return {\n width: width,\n height: height,\n top: y,\n right: x + width,\n bottom: y + height,\n left: x,\n x: x,\n y: y\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\"; // Returns the layout rect of an element relative to its offsetParent. Layout\n// means it doesn't take into account transforms.\n\nexport default function getLayoutRect(element) {\n var clientRect = getBoundingClientRect(element); // Use the clientRect sizes if it's not been transformed.\n // Fixes https://github.com/popperjs/popper-core/issues/1223\n\n var width = element.offsetWidth;\n var height = element.offsetHeight;\n\n if (Math.abs(clientRect.width - width) <= 1) {\n width = clientRect.width;\n }\n\n if (Math.abs(clientRect.height - height) <= 1) {\n height = clientRect.height;\n }\n\n return {\n x: element.offsetLeft,\n y: element.offsetTop,\n width: width,\n height: height\n };\n}","import { isShadowRoot } from \"./instanceOf.js\";\nexport default function contains(parent, child) {\n var rootNode = child.getRootNode && child.getRootNode(); // First, attempt with faster native method\n\n if (parent.contains(child)) {\n return true;\n } // then fallback to custom implementation with Shadow DOM support\n else if (rootNode && isShadowRoot(rootNode)) {\n var next = child;\n\n do {\n if (next && parent.isSameNode(next)) {\n return true;\n } // $FlowFixMe[prop-missing]: need a better way to handle this...\n\n\n next = next.parentNode || next.host;\n } while (next);\n } // Give up, the result is false\n\n\n return false;\n}","import getWindow from \"./getWindow.js\";\nexport default function getComputedStyle(element) {\n return getWindow(element).getComputedStyle(element);\n}","import getNodeName from \"./getNodeName.js\";\nexport default function isTableElement(element) {\n return ['table', 'td', 'th'].indexOf(getNodeName(element)) >= 0;\n}","import { isElement } from \"./instanceOf.js\";\nexport default function getDocumentElement(element) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return ((isElement(element) ? element.ownerDocument : // $FlowFixMe[prop-missing]\n element.document) || window.document).documentElement;\n}","import getNodeName from \"./getNodeName.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport { isShadowRoot } from \"./instanceOf.js\";\nexport default function getParentNode(element) {\n if (getNodeName(element) === 'html') {\n return element;\n }\n\n return (// this is a quicker (but less type safe) way to save quite some bytes from the bundle\n // $FlowFixMe[incompatible-return]\n // $FlowFixMe[prop-missing]\n element.assignedSlot || // step into the shadow DOM of the parent of a slotted node\n element.parentNode || ( // DOM Element detected\n isShadowRoot(element) ? element.host : null) || // ShadowRoot detected\n // $FlowFixMe[incompatible-call]: HTMLElement is a Node\n getDocumentElement(element) // fallback\n\n );\n}","import getWindow from \"./getWindow.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isHTMLElement, isShadowRoot } from \"./instanceOf.js\";\nimport isTableElement from \"./isTableElement.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getUAString from \"../utils/userAgent.js\";\n\nfunction getTrueOffsetParent(element) {\n if (!isHTMLElement(element) || // https://github.com/popperjs/popper-core/issues/837\n getComputedStyle(element).position === 'fixed') {\n return null;\n }\n\n return element.offsetParent;\n} // `.offsetParent` reports `null` for fixed elements, while absolute elements\n// return the containing block\n\n\nfunction getContainingBlock(element) {\n var isFirefox = /firefox/i.test(getUAString());\n var isIE = /Trident/i.test(getUAString());\n\n if (isIE && isHTMLElement(element)) {\n // In IE 9, 10 and 11 fixed elements containing block is always established by the viewport\n var elementCss = getComputedStyle(element);\n\n if (elementCss.position === 'fixed') {\n return null;\n }\n }\n\n var currentNode = getParentNode(element);\n\n if (isShadowRoot(currentNode)) {\n currentNode = currentNode.host;\n }\n\n while (isHTMLElement(currentNode) && ['html', 'body'].indexOf(getNodeName(currentNode)) < 0) {\n var css = getComputedStyle(currentNode); // This is non-exhaustive but covers the most common CSS properties that\n // create a containing block.\n // https://developer.mozilla.org/en-US/docs/Web/CSS/Containing_block#identifying_the_containing_block\n\n if (css.transform !== 'none' || css.perspective !== 'none' || css.contain === 'paint' || ['transform', 'perspective'].indexOf(css.willChange) !== -1 || isFirefox && css.willChange === 'filter' || isFirefox && css.filter && css.filter !== 'none') {\n return currentNode;\n } else {\n currentNode = currentNode.parentNode;\n }\n }\n\n return null;\n} // Gets the closest ancestor positioned element. Handles some edge cases,\n// such as table ancestors and cross browser bugs.\n\n\nexport default function getOffsetParent(element) {\n var window = getWindow(element);\n var offsetParent = getTrueOffsetParent(element);\n\n while (offsetParent && isTableElement(offsetParent) && getComputedStyle(offsetParent).position === 'static') {\n offsetParent = getTrueOffsetParent(offsetParent);\n }\n\n if (offsetParent && (getNodeName(offsetParent) === 'html' || getNodeName(offsetParent) === 'body' && getComputedStyle(offsetParent).position === 'static')) {\n return window;\n }\n\n return offsetParent || getContainingBlock(element) || window;\n}","export default function getMainAxisFromPlacement(placement) {\n return ['top', 'bottom'].indexOf(placement) >= 0 ? 'x' : 'y';\n}","import { max as mathMax, min as mathMin } from \"./math.js\";\nexport function within(min, value, max) {\n return mathMax(min, mathMin(value, max));\n}\nexport function withinMaxClamp(min, value, max) {\n var v = within(min, value, max);\n return v > max ? max : v;\n}","import getFreshSideObject from \"./getFreshSideObject.js\";\nexport default function mergePaddingObject(paddingObject) {\n return Object.assign({}, getFreshSideObject(), paddingObject);\n}","export default function getFreshSideObject() {\n return {\n top: 0,\n right: 0,\n bottom: 0,\n left: 0\n };\n}","export default function expandToHashMap(value, keys) {\n return keys.reduce(function (hashMap, key) {\n hashMap[key] = value;\n return hashMap;\n }, {});\n}","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport contains from \"../dom-utils/contains.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport { within } from \"../utils/within.js\";\nimport mergePaddingObject from \"../utils/mergePaddingObject.js\";\nimport expandToHashMap from \"../utils/expandToHashMap.js\";\nimport { left, right, basePlacements, top, bottom } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar toPaddingObject = function toPaddingObject(padding, state) {\n padding = typeof padding === 'function' ? padding(Object.assign({}, state.rects, {\n placement: state.placement\n })) : padding;\n return mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n};\n\nfunction arrow(_ref) {\n var _state$modifiersData$;\n\n var state = _ref.state,\n name = _ref.name,\n options = _ref.options;\n var arrowElement = state.elements.arrow;\n var popperOffsets = state.modifiersData.popperOffsets;\n var basePlacement = getBasePlacement(state.placement);\n var axis = getMainAxisFromPlacement(basePlacement);\n var isVertical = [left, right].indexOf(basePlacement) >= 0;\n var len = isVertical ? 'height' : 'width';\n\n if (!arrowElement || !popperOffsets) {\n return;\n }\n\n var paddingObject = toPaddingObject(options.padding, state);\n var arrowRect = getLayoutRect(arrowElement);\n var minProp = axis === 'y' ? top : left;\n var maxProp = axis === 'y' ? bottom : right;\n var endDiff = state.rects.reference[len] + state.rects.reference[axis] - popperOffsets[axis] - state.rects.popper[len];\n var startDiff = popperOffsets[axis] - state.rects.reference[axis];\n var arrowOffsetParent = getOffsetParent(arrowElement);\n var clientSize = arrowOffsetParent ? axis === 'y' ? arrowOffsetParent.clientHeight || 0 : arrowOffsetParent.clientWidth || 0 : 0;\n var centerToReference = endDiff / 2 - startDiff / 2; // Make sure the arrow doesn't overflow the popper if the center point is\n // outside of the popper bounds\n\n var min = paddingObject[minProp];\n var max = clientSize - arrowRect[len] - paddingObject[maxProp];\n var center = clientSize / 2 - arrowRect[len] / 2 + centerToReference;\n var offset = within(min, center, max); // Prevents breaking syntax highlighting...\n\n var axisProp = axis;\n state.modifiersData[name] = (_state$modifiersData$ = {}, _state$modifiersData$[axisProp] = offset, _state$modifiersData$.centerOffset = offset - center, _state$modifiersData$);\n}\n\nfunction effect(_ref2) {\n var state = _ref2.state,\n options = _ref2.options;\n var _options$element = options.element,\n arrowElement = _options$element === void 0 ? '[data-popper-arrow]' : _options$element;\n\n if (arrowElement == null) {\n return;\n } // CSS selector\n\n\n if (typeof arrowElement === 'string') {\n arrowElement = state.elements.popper.querySelector(arrowElement);\n\n if (!arrowElement) {\n return;\n }\n }\n\n if (!contains(state.elements.popper, arrowElement)) {\n return;\n }\n\n state.elements.arrow = arrowElement;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'arrow',\n enabled: true,\n phase: 'main',\n fn: arrow,\n effect: effect,\n requires: ['popperOffsets'],\n requiresIfExists: ['preventOverflow']\n};","export default function getVariation(placement) {\n return placement.split('-')[1];\n}","import { top, left, right, bottom, end } from \"../enums.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport getWindow from \"../dom-utils/getWindow.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getComputedStyle from \"../dom-utils/getComputedStyle.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport { round } from \"../utils/math.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar unsetSides = {\n top: 'auto',\n right: 'auto',\n bottom: 'auto',\n left: 'auto'\n}; // Round the offsets to the nearest suitable subpixel based on the DPR.\n// Zooming can change the DPR, but it seems to report a value that will\n// cleanly divide the values into the appropriate subpixels.\n\nfunction roundOffsetsByDPR(_ref, win) {\n var x = _ref.x,\n y = _ref.y;\n var dpr = win.devicePixelRatio || 1;\n return {\n x: round(x * dpr) / dpr || 0,\n y: round(y * dpr) / dpr || 0\n };\n}\n\nexport function mapToStyles(_ref2) {\n var _Object$assign2;\n\n var popper = _ref2.popper,\n popperRect = _ref2.popperRect,\n placement = _ref2.placement,\n variation = _ref2.variation,\n offsets = _ref2.offsets,\n position = _ref2.position,\n gpuAcceleration = _ref2.gpuAcceleration,\n adaptive = _ref2.adaptive,\n roundOffsets = _ref2.roundOffsets,\n isFixed = _ref2.isFixed;\n var _offsets$x = offsets.x,\n x = _offsets$x === void 0 ? 0 : _offsets$x,\n _offsets$y = offsets.y,\n y = _offsets$y === void 0 ? 0 : _offsets$y;\n\n var _ref3 = typeof roundOffsets === 'function' ? roundOffsets({\n x: x,\n y: y\n }) : {\n x: x,\n y: y\n };\n\n x = _ref3.x;\n y = _ref3.y;\n var hasX = offsets.hasOwnProperty('x');\n var hasY = offsets.hasOwnProperty('y');\n var sideX = left;\n var sideY = top;\n var win = window;\n\n if (adaptive) {\n var offsetParent = getOffsetParent(popper);\n var heightProp = 'clientHeight';\n var widthProp = 'clientWidth';\n\n if (offsetParent === getWindow(popper)) {\n offsetParent = getDocumentElement(popper);\n\n if (getComputedStyle(offsetParent).position !== 'static' && position === 'absolute') {\n heightProp = 'scrollHeight';\n widthProp = 'scrollWidth';\n }\n } // $FlowFixMe[incompatible-cast]: force type refinement, we compare offsetParent with window above, but Flow doesn't detect it\n\n\n offsetParent = offsetParent;\n\n if (placement === top || (placement === left || placement === right) && variation === end) {\n sideY = bottom;\n var offsetY = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.height : // $FlowFixMe[prop-missing]\n offsetParent[heightProp];\n y -= offsetY - popperRect.height;\n y *= gpuAcceleration ? 1 : -1;\n }\n\n if (placement === left || (placement === top || placement === bottom) && variation === end) {\n sideX = right;\n var offsetX = isFixed && offsetParent === win && win.visualViewport ? win.visualViewport.width : // $FlowFixMe[prop-missing]\n offsetParent[widthProp];\n x -= offsetX - popperRect.width;\n x *= gpuAcceleration ? 1 : -1;\n }\n }\n\n var commonStyles = Object.assign({\n position: position\n }, adaptive && unsetSides);\n\n var _ref4 = roundOffsets === true ? roundOffsetsByDPR({\n x: x,\n y: y\n }, getWindow(popper)) : {\n x: x,\n y: y\n };\n\n x = _ref4.x;\n y = _ref4.y;\n\n if (gpuAcceleration) {\n var _Object$assign;\n\n return Object.assign({}, commonStyles, (_Object$assign = {}, _Object$assign[sideY] = hasY ? '0' : '', _Object$assign[sideX] = hasX ? '0' : '', _Object$assign.transform = (win.devicePixelRatio || 1) <= 1 ? \"translate(\" + x + \"px, \" + y + \"px)\" : \"translate3d(\" + x + \"px, \" + y + \"px, 0)\", _Object$assign));\n }\n\n return Object.assign({}, commonStyles, (_Object$assign2 = {}, _Object$assign2[sideY] = hasY ? y + \"px\" : '', _Object$assign2[sideX] = hasX ? x + \"px\" : '', _Object$assign2.transform = '', _Object$assign2));\n}\n\nfunction computeStyles(_ref5) {\n var state = _ref5.state,\n options = _ref5.options;\n var _options$gpuAccelerat = options.gpuAcceleration,\n gpuAcceleration = _options$gpuAccelerat === void 0 ? true : _options$gpuAccelerat,\n _options$adaptive = options.adaptive,\n adaptive = _options$adaptive === void 0 ? true : _options$adaptive,\n _options$roundOffsets = options.roundOffsets,\n roundOffsets = _options$roundOffsets === void 0 ? true : _options$roundOffsets;\n var commonStyles = {\n placement: getBasePlacement(state.placement),\n variation: getVariation(state.placement),\n popper: state.elements.popper,\n popperRect: state.rects.popper,\n gpuAcceleration: gpuAcceleration,\n isFixed: state.options.strategy === 'fixed'\n };\n\n if (state.modifiersData.popperOffsets != null) {\n state.styles.popper = Object.assign({}, state.styles.popper, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.popperOffsets,\n position: state.options.strategy,\n adaptive: adaptive,\n roundOffsets: roundOffsets\n })));\n }\n\n if (state.modifiersData.arrow != null) {\n state.styles.arrow = Object.assign({}, state.styles.arrow, mapToStyles(Object.assign({}, commonStyles, {\n offsets: state.modifiersData.arrow,\n position: 'absolute',\n adaptive: false,\n roundOffsets: roundOffsets\n })));\n }\n\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-placement': state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'computeStyles',\n enabled: true,\n phase: 'beforeWrite',\n fn: computeStyles,\n data: {}\n};","import getWindow from \"../dom-utils/getWindow.js\"; // eslint-disable-next-line import/no-unused-modules\n\nvar passive = {\n passive: true\n};\n\nfunction effect(_ref) {\n var state = _ref.state,\n instance = _ref.instance,\n options = _ref.options;\n var _options$scroll = options.scroll,\n scroll = _options$scroll === void 0 ? true : _options$scroll,\n _options$resize = options.resize,\n resize = _options$resize === void 0 ? true : _options$resize;\n var window = getWindow(state.elements.popper);\n var scrollParents = [].concat(state.scrollParents.reference, state.scrollParents.popper);\n\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.addEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.addEventListener('resize', instance.update, passive);\n }\n\n return function () {\n if (scroll) {\n scrollParents.forEach(function (scrollParent) {\n scrollParent.removeEventListener('scroll', instance.update, passive);\n });\n }\n\n if (resize) {\n window.removeEventListener('resize', instance.update, passive);\n }\n };\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'eventListeners',\n enabled: true,\n phase: 'write',\n fn: function fn() {},\n effect: effect,\n data: {}\n};","var hash = {\n left: 'right',\n right: 'left',\n bottom: 'top',\n top: 'bottom'\n};\nexport default function getOppositePlacement(placement) {\n return placement.replace(/left|right|bottom|top/g, function (matched) {\n return hash[matched];\n });\n}","var hash = {\n start: 'end',\n end: 'start'\n};\nexport default function getOppositeVariationPlacement(placement) {\n return placement.replace(/start|end/g, function (matched) {\n return hash[matched];\n });\n}","import getWindow from \"./getWindow.js\";\nexport default function getWindowScroll(node) {\n var win = getWindow(node);\n var scrollLeft = win.pageXOffset;\n var scrollTop = win.pageYOffset;\n return {\n scrollLeft: scrollLeft,\n scrollTop: scrollTop\n };\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nexport default function getWindowScrollBarX(element) {\n // If has a CSS width greater than the viewport, then this will be\n // incorrect for RTL.\n // Popper 1 is broken in this case and never had a bug report so let's assume\n // it's not an issue. I don't think anyone ever specifies width on \n // anyway.\n // Browsers where the left scrollbar doesn't cause an issue report `0` for\n // this (e.g. Edge 2019, IE11, Safari)\n return getBoundingClientRect(getDocumentElement(element)).left + getWindowScroll(element).scrollLeft;\n}","import getComputedStyle from \"./getComputedStyle.js\";\nexport default function isScrollParent(element) {\n // Firefox wants us to check `-x` and `-y` variations as well\n var _getComputedStyle = getComputedStyle(element),\n overflow = _getComputedStyle.overflow,\n overflowX = _getComputedStyle.overflowX,\n overflowY = _getComputedStyle.overflowY;\n\n return /auto|scroll|overlay|hidden/.test(overflow + overflowY + overflowX);\n}","import getParentNode from \"./getParentNode.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nexport default function getScrollParent(node) {\n if (['html', 'body', '#document'].indexOf(getNodeName(node)) >= 0) {\n // $FlowFixMe[incompatible-return]: assume body is always available\n return node.ownerDocument.body;\n }\n\n if (isHTMLElement(node) && isScrollParent(node)) {\n return node;\n }\n\n return getScrollParent(getParentNode(node));\n}","import getScrollParent from \"./getScrollParent.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport getWindow from \"./getWindow.js\";\nimport isScrollParent from \"./isScrollParent.js\";\n/*\ngiven a DOM element, return the list of all scroll parents, up the list of ancesors\nuntil we get to the top window object. This list is what we attach scroll listeners\nto, because if any of these parent elements scroll, we'll need to re-calculate the\nreference element's position.\n*/\n\nexport default function listScrollParents(element, list) {\n var _element$ownerDocumen;\n\n if (list === void 0) {\n list = [];\n }\n\n var scrollParent = getScrollParent(element);\n var isBody = scrollParent === ((_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body);\n var win = getWindow(scrollParent);\n var target = isBody ? [win].concat(win.visualViewport || [], isScrollParent(scrollParent) ? scrollParent : []) : scrollParent;\n var updatedList = list.concat(target);\n return isBody ? updatedList : // $FlowFixMe[incompatible-call]: isBody tells us target will be an HTMLElement here\n updatedList.concat(listScrollParents(getParentNode(target)));\n}","export default function rectToClientRect(rect) {\n return Object.assign({}, rect, {\n left: rect.x,\n top: rect.y,\n right: rect.x + rect.width,\n bottom: rect.y + rect.height\n });\n}","import { viewport } from \"../enums.js\";\nimport getViewportRect from \"./getViewportRect.js\";\nimport getDocumentRect from \"./getDocumentRect.js\";\nimport listScrollParents from \"./listScrollParents.js\";\nimport getOffsetParent from \"./getOffsetParent.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport { isElement, isHTMLElement } from \"./instanceOf.js\";\nimport getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getParentNode from \"./getParentNode.js\";\nimport contains from \"./contains.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport rectToClientRect from \"../utils/rectToClientRect.js\";\nimport { max, min } from \"../utils/math.js\";\n\nfunction getInnerBoundingClientRect(element, strategy) {\n var rect = getBoundingClientRect(element, false, strategy === 'fixed');\n rect.top = rect.top + element.clientTop;\n rect.left = rect.left + element.clientLeft;\n rect.bottom = rect.top + element.clientHeight;\n rect.right = rect.left + element.clientWidth;\n rect.width = element.clientWidth;\n rect.height = element.clientHeight;\n rect.x = rect.left;\n rect.y = rect.top;\n return rect;\n}\n\nfunction getClientRectFromMixedType(element, clippingParent, strategy) {\n return clippingParent === viewport ? rectToClientRect(getViewportRect(element, strategy)) : isElement(clippingParent) ? getInnerBoundingClientRect(clippingParent, strategy) : rectToClientRect(getDocumentRect(getDocumentElement(element)));\n} // A \"clipping parent\" is an overflowable container with the characteristic of\n// clipping (or hiding) overflowing elements with a position different from\n// `initial`\n\n\nfunction getClippingParents(element) {\n var clippingParents = listScrollParents(getParentNode(element));\n var canEscapeClipping = ['absolute', 'fixed'].indexOf(getComputedStyle(element).position) >= 0;\n var clipperElement = canEscapeClipping && isHTMLElement(element) ? getOffsetParent(element) : element;\n\n if (!isElement(clipperElement)) {\n return [];\n } // $FlowFixMe[incompatible-return]: https://github.com/facebook/flow/issues/1414\n\n\n return clippingParents.filter(function (clippingParent) {\n return isElement(clippingParent) && contains(clippingParent, clipperElement) && getNodeName(clippingParent) !== 'body';\n });\n} // Gets the maximum area that the element is visible in due to any number of\n// clipping parents\n\n\nexport default function getClippingRect(element, boundary, rootBoundary, strategy) {\n var mainClippingParents = boundary === 'clippingParents' ? getClippingParents(element) : [].concat(boundary);\n var clippingParents = [].concat(mainClippingParents, [rootBoundary]);\n var firstClippingParent = clippingParents[0];\n var clippingRect = clippingParents.reduce(function (accRect, clippingParent) {\n var rect = getClientRectFromMixedType(element, clippingParent, strategy);\n accRect.top = max(rect.top, accRect.top);\n accRect.right = min(rect.right, accRect.right);\n accRect.bottom = min(rect.bottom, accRect.bottom);\n accRect.left = max(rect.left, accRect.left);\n return accRect;\n }, getClientRectFromMixedType(element, firstClippingParent, strategy));\n clippingRect.width = clippingRect.right - clippingRect.left;\n clippingRect.height = clippingRect.bottom - clippingRect.top;\n clippingRect.x = clippingRect.left;\n clippingRect.y = clippingRect.top;\n return clippingRect;\n}","import getWindow from \"./getWindow.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport isLayoutViewport from \"./isLayoutViewport.js\";\nexport default function getViewportRect(element, strategy) {\n var win = getWindow(element);\n var html = getDocumentElement(element);\n var visualViewport = win.visualViewport;\n var width = html.clientWidth;\n var height = html.clientHeight;\n var x = 0;\n var y = 0;\n\n if (visualViewport) {\n width = visualViewport.width;\n height = visualViewport.height;\n var layoutViewport = isLayoutViewport();\n\n if (layoutViewport || !layoutViewport && strategy === 'fixed') {\n x = visualViewport.offsetLeft;\n y = visualViewport.offsetTop;\n }\n }\n\n return {\n width: width,\n height: height,\n x: x + getWindowScrollBarX(element),\n y: y\n };\n}","import getDocumentElement from \"./getDocumentElement.js\";\nimport getComputedStyle from \"./getComputedStyle.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getWindowScroll from \"./getWindowScroll.js\";\nimport { max } from \"../utils/math.js\"; // Gets the entire size of the scrollable document area, even extending outside\n// of the `` and `` rect bounds if horizontally scrollable\n\nexport default function getDocumentRect(element) {\n var _element$ownerDocumen;\n\n var html = getDocumentElement(element);\n var winScroll = getWindowScroll(element);\n var body = (_element$ownerDocumen = element.ownerDocument) == null ? void 0 : _element$ownerDocumen.body;\n var width = max(html.scrollWidth, html.clientWidth, body ? body.scrollWidth : 0, body ? body.clientWidth : 0);\n var height = max(html.scrollHeight, html.clientHeight, body ? body.scrollHeight : 0, body ? body.clientHeight : 0);\n var x = -winScroll.scrollLeft + getWindowScrollBarX(element);\n var y = -winScroll.scrollTop;\n\n if (getComputedStyle(body || html).direction === 'rtl') {\n x += max(html.clientWidth, body ? body.clientWidth : 0) - width;\n }\n\n return {\n width: width,\n height: height,\n x: x,\n y: y\n };\n}","import getBasePlacement from \"./getBasePlacement.js\";\nimport getVariation from \"./getVariation.js\";\nimport getMainAxisFromPlacement from \"./getMainAxisFromPlacement.js\";\nimport { top, right, bottom, left, start, end } from \"../enums.js\";\nexport default function computeOffsets(_ref) {\n var reference = _ref.reference,\n element = _ref.element,\n placement = _ref.placement;\n var basePlacement = placement ? getBasePlacement(placement) : null;\n var variation = placement ? getVariation(placement) : null;\n var commonX = reference.x + reference.width / 2 - element.width / 2;\n var commonY = reference.y + reference.height / 2 - element.height / 2;\n var offsets;\n\n switch (basePlacement) {\n case top:\n offsets = {\n x: commonX,\n y: reference.y - element.height\n };\n break;\n\n case bottom:\n offsets = {\n x: commonX,\n y: reference.y + reference.height\n };\n break;\n\n case right:\n offsets = {\n x: reference.x + reference.width,\n y: commonY\n };\n break;\n\n case left:\n offsets = {\n x: reference.x - element.width,\n y: commonY\n };\n break;\n\n default:\n offsets = {\n x: reference.x,\n y: reference.y\n };\n }\n\n var mainAxis = basePlacement ? getMainAxisFromPlacement(basePlacement) : null;\n\n if (mainAxis != null) {\n var len = mainAxis === 'y' ? 'height' : 'width';\n\n switch (variation) {\n case start:\n offsets[mainAxis] = offsets[mainAxis] - (reference[len] / 2 - element[len] / 2);\n break;\n\n case end:\n offsets[mainAxis] = offsets[mainAxis] + (reference[len] / 2 - element[len] / 2);\n break;\n\n default:\n }\n }\n\n return offsets;\n}","import getClippingRect from \"../dom-utils/getClippingRect.js\";\nimport getDocumentElement from \"../dom-utils/getDocumentElement.js\";\nimport getBoundingClientRect from \"../dom-utils/getBoundingClientRect.js\";\nimport computeOffsets from \"./computeOffsets.js\";\nimport rectToClientRect from \"./rectToClientRect.js\";\nimport { clippingParents, reference, popper, bottom, top, right, basePlacements, viewport } from \"../enums.js\";\nimport { isElement } from \"../dom-utils/instanceOf.js\";\nimport mergePaddingObject from \"./mergePaddingObject.js\";\nimport expandToHashMap from \"./expandToHashMap.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport default function detectOverflow(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n _options$placement = _options.placement,\n placement = _options$placement === void 0 ? state.placement : _options$placement,\n _options$strategy = _options.strategy,\n strategy = _options$strategy === void 0 ? state.strategy : _options$strategy,\n _options$boundary = _options.boundary,\n boundary = _options$boundary === void 0 ? clippingParents : _options$boundary,\n _options$rootBoundary = _options.rootBoundary,\n rootBoundary = _options$rootBoundary === void 0 ? viewport : _options$rootBoundary,\n _options$elementConte = _options.elementContext,\n elementContext = _options$elementConte === void 0 ? popper : _options$elementConte,\n _options$altBoundary = _options.altBoundary,\n altBoundary = _options$altBoundary === void 0 ? false : _options$altBoundary,\n _options$padding = _options.padding,\n padding = _options$padding === void 0 ? 0 : _options$padding;\n var paddingObject = mergePaddingObject(typeof padding !== 'number' ? padding : expandToHashMap(padding, basePlacements));\n var altContext = elementContext === popper ? reference : popper;\n var popperRect = state.rects.popper;\n var element = state.elements[altBoundary ? altContext : elementContext];\n var clippingClientRect = getClippingRect(isElement(element) ? element : element.contextElement || getDocumentElement(state.elements.popper), boundary, rootBoundary, strategy);\n var referenceClientRect = getBoundingClientRect(state.elements.reference);\n var popperOffsets = computeOffsets({\n reference: referenceClientRect,\n element: popperRect,\n strategy: 'absolute',\n placement: placement\n });\n var popperClientRect = rectToClientRect(Object.assign({}, popperRect, popperOffsets));\n var elementClientRect = elementContext === popper ? popperClientRect : referenceClientRect; // positive = overflowing the clipping rect\n // 0 or negative = within the clipping rect\n\n var overflowOffsets = {\n top: clippingClientRect.top - elementClientRect.top + paddingObject.top,\n bottom: elementClientRect.bottom - clippingClientRect.bottom + paddingObject.bottom,\n left: clippingClientRect.left - elementClientRect.left + paddingObject.left,\n right: elementClientRect.right - clippingClientRect.right + paddingObject.right\n };\n var offsetData = state.modifiersData.offset; // Offsets can be applied only to the popper element\n\n if (elementContext === popper && offsetData) {\n var offset = offsetData[placement];\n Object.keys(overflowOffsets).forEach(function (key) {\n var multiply = [right, bottom].indexOf(key) >= 0 ? 1 : -1;\n var axis = [top, bottom].indexOf(key) >= 0 ? 'y' : 'x';\n overflowOffsets[key] += offset[axis] * multiply;\n });\n }\n\n return overflowOffsets;\n}","import getOppositePlacement from \"../utils/getOppositePlacement.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getOppositeVariationPlacement from \"../utils/getOppositeVariationPlacement.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport computeAutoPlacement from \"../utils/computeAutoPlacement.js\";\nimport { bottom, top, start, right, left, auto } from \"../enums.js\";\nimport getVariation from \"../utils/getVariation.js\"; // eslint-disable-next-line import/no-unused-modules\n\nfunction getExpandedFallbackPlacements(placement) {\n if (getBasePlacement(placement) === auto) {\n return [];\n }\n\n var oppositePlacement = getOppositePlacement(placement);\n return [getOppositeVariationPlacement(placement), oppositePlacement, getOppositeVariationPlacement(oppositePlacement)];\n}\n\nfunction flip(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n\n if (state.modifiersData[name]._skip) {\n return;\n }\n\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? true : _options$altAxis,\n specifiedFallbackPlacements = options.fallbackPlacements,\n padding = options.padding,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n _options$flipVariatio = options.flipVariations,\n flipVariations = _options$flipVariatio === void 0 ? true : _options$flipVariatio,\n allowedAutoPlacements = options.allowedAutoPlacements;\n var preferredPlacement = state.options.placement;\n var basePlacement = getBasePlacement(preferredPlacement);\n var isBasePlacement = basePlacement === preferredPlacement;\n var fallbackPlacements = specifiedFallbackPlacements || (isBasePlacement || !flipVariations ? [getOppositePlacement(preferredPlacement)] : getExpandedFallbackPlacements(preferredPlacement));\n var placements = [preferredPlacement].concat(fallbackPlacements).reduce(function (acc, placement) {\n return acc.concat(getBasePlacement(placement) === auto ? computeAutoPlacement(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n flipVariations: flipVariations,\n allowedAutoPlacements: allowedAutoPlacements\n }) : placement);\n }, []);\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var checksMap = new Map();\n var makeFallbackChecks = true;\n var firstFittingPlacement = placements[0];\n\n for (var i = 0; i < placements.length; i++) {\n var placement = placements[i];\n\n var _basePlacement = getBasePlacement(placement);\n\n var isStartVariation = getVariation(placement) === start;\n var isVertical = [top, bottom].indexOf(_basePlacement) >= 0;\n var len = isVertical ? 'width' : 'height';\n var overflow = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n altBoundary: altBoundary,\n padding: padding\n });\n var mainVariationSide = isVertical ? isStartVariation ? right : left : isStartVariation ? bottom : top;\n\n if (referenceRect[len] > popperRect[len]) {\n mainVariationSide = getOppositePlacement(mainVariationSide);\n }\n\n var altVariationSide = getOppositePlacement(mainVariationSide);\n var checks = [];\n\n if (checkMainAxis) {\n checks.push(overflow[_basePlacement] <= 0);\n }\n\n if (checkAltAxis) {\n checks.push(overflow[mainVariationSide] <= 0, overflow[altVariationSide] <= 0);\n }\n\n if (checks.every(function (check) {\n return check;\n })) {\n firstFittingPlacement = placement;\n makeFallbackChecks = false;\n break;\n }\n\n checksMap.set(placement, checks);\n }\n\n if (makeFallbackChecks) {\n // `2` may be desired in some cases – research later\n var numberOfChecks = flipVariations ? 3 : 1;\n\n var _loop = function _loop(_i) {\n var fittingPlacement = placements.find(function (placement) {\n var checks = checksMap.get(placement);\n\n if (checks) {\n return checks.slice(0, _i).every(function (check) {\n return check;\n });\n }\n });\n\n if (fittingPlacement) {\n firstFittingPlacement = fittingPlacement;\n return \"break\";\n }\n };\n\n for (var _i = numberOfChecks; _i > 0; _i--) {\n var _ret = _loop(_i);\n\n if (_ret === \"break\") break;\n }\n }\n\n if (state.placement !== firstFittingPlacement) {\n state.modifiersData[name]._skip = true;\n state.placement = firstFittingPlacement;\n state.reset = true;\n }\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'flip',\n enabled: true,\n phase: 'main',\n fn: flip,\n requiresIfExists: ['offset'],\n data: {\n _skip: false\n }\n};","import getVariation from \"./getVariation.js\";\nimport { variationPlacements, basePlacements, placements as allPlacements } from \"../enums.js\";\nimport detectOverflow from \"./detectOverflow.js\";\nimport getBasePlacement from \"./getBasePlacement.js\";\nexport default function computeAutoPlacement(state, options) {\n if (options === void 0) {\n options = {};\n }\n\n var _options = options,\n placement = _options.placement,\n boundary = _options.boundary,\n rootBoundary = _options.rootBoundary,\n padding = _options.padding,\n flipVariations = _options.flipVariations,\n _options$allowedAutoP = _options.allowedAutoPlacements,\n allowedAutoPlacements = _options$allowedAutoP === void 0 ? allPlacements : _options$allowedAutoP;\n var variation = getVariation(placement);\n var placements = variation ? flipVariations ? variationPlacements : variationPlacements.filter(function (placement) {\n return getVariation(placement) === variation;\n }) : basePlacements;\n var allowedPlacements = placements.filter(function (placement) {\n return allowedAutoPlacements.indexOf(placement) >= 0;\n });\n\n if (allowedPlacements.length === 0) {\n allowedPlacements = placements;\n } // $FlowFixMe[incompatible-type]: Flow seems to have problems with two array unions...\n\n\n var overflows = allowedPlacements.reduce(function (acc, placement) {\n acc[placement] = detectOverflow(state, {\n placement: placement,\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding\n })[getBasePlacement(placement)];\n return acc;\n }, {});\n return Object.keys(overflows).sort(function (a, b) {\n return overflows[a] - overflows[b];\n });\n}","import { top, bottom, left, right } from \"../enums.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\n\nfunction getSideOffsets(overflow, rect, preventedOffsets) {\n if (preventedOffsets === void 0) {\n preventedOffsets = {\n x: 0,\n y: 0\n };\n }\n\n return {\n top: overflow.top - rect.height - preventedOffsets.y,\n right: overflow.right - rect.width + preventedOffsets.x,\n bottom: overflow.bottom - rect.height + preventedOffsets.y,\n left: overflow.left - rect.width - preventedOffsets.x\n };\n}\n\nfunction isAnySideFullyClipped(overflow) {\n return [top, right, bottom, left].some(function (side) {\n return overflow[side] >= 0;\n });\n}\n\nfunction hide(_ref) {\n var state = _ref.state,\n name = _ref.name;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var preventedOffsets = state.modifiersData.preventOverflow;\n var referenceOverflow = detectOverflow(state, {\n elementContext: 'reference'\n });\n var popperAltOverflow = detectOverflow(state, {\n altBoundary: true\n });\n var referenceClippingOffsets = getSideOffsets(referenceOverflow, referenceRect);\n var popperEscapeOffsets = getSideOffsets(popperAltOverflow, popperRect, preventedOffsets);\n var isReferenceHidden = isAnySideFullyClipped(referenceClippingOffsets);\n var hasPopperEscaped = isAnySideFullyClipped(popperEscapeOffsets);\n state.modifiersData[name] = {\n referenceClippingOffsets: referenceClippingOffsets,\n popperEscapeOffsets: popperEscapeOffsets,\n isReferenceHidden: isReferenceHidden,\n hasPopperEscaped: hasPopperEscaped\n };\n state.attributes.popper = Object.assign({}, state.attributes.popper, {\n 'data-popper-reference-hidden': isReferenceHidden,\n 'data-popper-escaped': hasPopperEscaped\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'hide',\n enabled: true,\n phase: 'main',\n requiresIfExists: ['preventOverflow'],\n fn: hide\n};","import getBasePlacement from \"../utils/getBasePlacement.js\";\nimport { top, left, right, placements } from \"../enums.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport function distanceAndSkiddingToXY(placement, rects, offset) {\n var basePlacement = getBasePlacement(placement);\n var invertDistance = [left, top].indexOf(basePlacement) >= 0 ? -1 : 1;\n\n var _ref = typeof offset === 'function' ? offset(Object.assign({}, rects, {\n placement: placement\n })) : offset,\n skidding = _ref[0],\n distance = _ref[1];\n\n skidding = skidding || 0;\n distance = (distance || 0) * invertDistance;\n return [left, right].indexOf(basePlacement) >= 0 ? {\n x: distance,\n y: skidding\n } : {\n x: skidding,\n y: distance\n };\n}\n\nfunction offset(_ref2) {\n var state = _ref2.state,\n options = _ref2.options,\n name = _ref2.name;\n var _options$offset = options.offset,\n offset = _options$offset === void 0 ? [0, 0] : _options$offset;\n var data = placements.reduce(function (acc, placement) {\n acc[placement] = distanceAndSkiddingToXY(placement, state.rects, offset);\n return acc;\n }, {});\n var _data$state$placement = data[state.placement],\n x = _data$state$placement.x,\n y = _data$state$placement.y;\n\n if (state.modifiersData.popperOffsets != null) {\n state.modifiersData.popperOffsets.x += x;\n state.modifiersData.popperOffsets.y += y;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'offset',\n enabled: true,\n phase: 'main',\n requires: ['popperOffsets'],\n fn: offset\n};","import computeOffsets from \"../utils/computeOffsets.js\";\n\nfunction popperOffsets(_ref) {\n var state = _ref.state,\n name = _ref.name;\n // Offsets are the actual position the popper needs to have to be\n // properly positioned near its reference element\n // This is the most basic placement, and will be adjusted by\n // the modifiers in the next step\n state.modifiersData[name] = computeOffsets({\n reference: state.rects.reference,\n element: state.rects.popper,\n strategy: 'absolute',\n placement: state.placement\n });\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'popperOffsets',\n enabled: true,\n phase: 'read',\n fn: popperOffsets,\n data: {}\n};","import { top, left, right, bottom, start } from \"../enums.js\";\nimport getBasePlacement from \"../utils/getBasePlacement.js\";\nimport getMainAxisFromPlacement from \"../utils/getMainAxisFromPlacement.js\";\nimport getAltAxis from \"../utils/getAltAxis.js\";\nimport { within, withinMaxClamp } from \"../utils/within.js\";\nimport getLayoutRect from \"../dom-utils/getLayoutRect.js\";\nimport getOffsetParent from \"../dom-utils/getOffsetParent.js\";\nimport detectOverflow from \"../utils/detectOverflow.js\";\nimport getVariation from \"../utils/getVariation.js\";\nimport getFreshSideObject from \"../utils/getFreshSideObject.js\";\nimport { min as mathMin, max as mathMax } from \"../utils/math.js\";\n\nfunction preventOverflow(_ref) {\n var state = _ref.state,\n options = _ref.options,\n name = _ref.name;\n var _options$mainAxis = options.mainAxis,\n checkMainAxis = _options$mainAxis === void 0 ? true : _options$mainAxis,\n _options$altAxis = options.altAxis,\n checkAltAxis = _options$altAxis === void 0 ? false : _options$altAxis,\n boundary = options.boundary,\n rootBoundary = options.rootBoundary,\n altBoundary = options.altBoundary,\n padding = options.padding,\n _options$tether = options.tether,\n tether = _options$tether === void 0 ? true : _options$tether,\n _options$tetherOffset = options.tetherOffset,\n tetherOffset = _options$tetherOffset === void 0 ? 0 : _options$tetherOffset;\n var overflow = detectOverflow(state, {\n boundary: boundary,\n rootBoundary: rootBoundary,\n padding: padding,\n altBoundary: altBoundary\n });\n var basePlacement = getBasePlacement(state.placement);\n var variation = getVariation(state.placement);\n var isBasePlacement = !variation;\n var mainAxis = getMainAxisFromPlacement(basePlacement);\n var altAxis = getAltAxis(mainAxis);\n var popperOffsets = state.modifiersData.popperOffsets;\n var referenceRect = state.rects.reference;\n var popperRect = state.rects.popper;\n var tetherOffsetValue = typeof tetherOffset === 'function' ? tetherOffset(Object.assign({}, state.rects, {\n placement: state.placement\n })) : tetherOffset;\n var normalizedTetherOffsetValue = typeof tetherOffsetValue === 'number' ? {\n mainAxis: tetherOffsetValue,\n altAxis: tetherOffsetValue\n } : Object.assign({\n mainAxis: 0,\n altAxis: 0\n }, tetherOffsetValue);\n var offsetModifierState = state.modifiersData.offset ? state.modifiersData.offset[state.placement] : null;\n var data = {\n x: 0,\n y: 0\n };\n\n if (!popperOffsets) {\n return;\n }\n\n if (checkMainAxis) {\n var _offsetModifierState$;\n\n var mainSide = mainAxis === 'y' ? top : left;\n var altSide = mainAxis === 'y' ? bottom : right;\n var len = mainAxis === 'y' ? 'height' : 'width';\n var offset = popperOffsets[mainAxis];\n var min = offset + overflow[mainSide];\n var max = offset - overflow[altSide];\n var additive = tether ? -popperRect[len] / 2 : 0;\n var minLen = variation === start ? referenceRect[len] : popperRect[len];\n var maxLen = variation === start ? -popperRect[len] : -referenceRect[len]; // We need to include the arrow in the calculation so the arrow doesn't go\n // outside the reference bounds\n\n var arrowElement = state.elements.arrow;\n var arrowRect = tether && arrowElement ? getLayoutRect(arrowElement) : {\n width: 0,\n height: 0\n };\n var arrowPaddingObject = state.modifiersData['arrow#persistent'] ? state.modifiersData['arrow#persistent'].padding : getFreshSideObject();\n var arrowPaddingMin = arrowPaddingObject[mainSide];\n var arrowPaddingMax = arrowPaddingObject[altSide]; // If the reference length is smaller than the arrow length, we don't want\n // to include its full size in the calculation. If the reference is small\n // and near the edge of a boundary, the popper can overflow even if the\n // reference is not overflowing as well (e.g. virtual elements with no\n // width or height)\n\n var arrowLen = within(0, referenceRect[len], arrowRect[len]);\n var minOffset = isBasePlacement ? referenceRect[len] / 2 - additive - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis : minLen - arrowLen - arrowPaddingMin - normalizedTetherOffsetValue.mainAxis;\n var maxOffset = isBasePlacement ? -referenceRect[len] / 2 + additive + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis : maxLen + arrowLen + arrowPaddingMax + normalizedTetherOffsetValue.mainAxis;\n var arrowOffsetParent = state.elements.arrow && getOffsetParent(state.elements.arrow);\n var clientOffset = arrowOffsetParent ? mainAxis === 'y' ? arrowOffsetParent.clientTop || 0 : arrowOffsetParent.clientLeft || 0 : 0;\n var offsetModifierValue = (_offsetModifierState$ = offsetModifierState == null ? void 0 : offsetModifierState[mainAxis]) != null ? _offsetModifierState$ : 0;\n var tetherMin = offset + minOffset - offsetModifierValue - clientOffset;\n var tetherMax = offset + maxOffset - offsetModifierValue;\n var preventedOffset = within(tether ? mathMin(min, tetherMin) : min, offset, tether ? mathMax(max, tetherMax) : max);\n popperOffsets[mainAxis] = preventedOffset;\n data[mainAxis] = preventedOffset - offset;\n }\n\n if (checkAltAxis) {\n var _offsetModifierState$2;\n\n var _mainSide = mainAxis === 'x' ? top : left;\n\n var _altSide = mainAxis === 'x' ? bottom : right;\n\n var _offset = popperOffsets[altAxis];\n\n var _len = altAxis === 'y' ? 'height' : 'width';\n\n var _min = _offset + overflow[_mainSide];\n\n var _max = _offset - overflow[_altSide];\n\n var isOriginSide = [top, left].indexOf(basePlacement) !== -1;\n\n var _offsetModifierValue = (_offsetModifierState$2 = offsetModifierState == null ? void 0 : offsetModifierState[altAxis]) != null ? _offsetModifierState$2 : 0;\n\n var _tetherMin = isOriginSide ? _min : _offset - referenceRect[_len] - popperRect[_len] - _offsetModifierValue + normalizedTetherOffsetValue.altAxis;\n\n var _tetherMax = isOriginSide ? _offset + referenceRect[_len] + popperRect[_len] - _offsetModifierValue - normalizedTetherOffsetValue.altAxis : _max;\n\n var _preventedOffset = tether && isOriginSide ? withinMaxClamp(_tetherMin, _offset, _tetherMax) : within(tether ? _tetherMin : _min, _offset, tether ? _tetherMax : _max);\n\n popperOffsets[altAxis] = _preventedOffset;\n data[altAxis] = _preventedOffset - _offset;\n }\n\n state.modifiersData[name] = data;\n} // eslint-disable-next-line import/no-unused-modules\n\n\nexport default {\n name: 'preventOverflow',\n enabled: true,\n phase: 'main',\n fn: preventOverflow,\n requiresIfExists: ['offset']\n};","export default function getAltAxis(axis) {\n return axis === 'x' ? 'y' : 'x';\n}","import getBoundingClientRect from \"./getBoundingClientRect.js\";\nimport getNodeScroll from \"./getNodeScroll.js\";\nimport getNodeName from \"./getNodeName.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getWindowScrollBarX from \"./getWindowScrollBarX.js\";\nimport getDocumentElement from \"./getDocumentElement.js\";\nimport isScrollParent from \"./isScrollParent.js\";\nimport { round } from \"../utils/math.js\";\n\nfunction isElementScaled(element) {\n var rect = element.getBoundingClientRect();\n var scaleX = round(rect.width) / element.offsetWidth || 1;\n var scaleY = round(rect.height) / element.offsetHeight || 1;\n return scaleX !== 1 || scaleY !== 1;\n} // Returns the composite rect of an element relative to its offsetParent.\n// Composite means it takes into account transforms as well as layout.\n\n\nexport default function getCompositeRect(elementOrVirtualElement, offsetParent, isFixed) {\n if (isFixed === void 0) {\n isFixed = false;\n }\n\n var isOffsetParentAnElement = isHTMLElement(offsetParent);\n var offsetParentIsScaled = isHTMLElement(offsetParent) && isElementScaled(offsetParent);\n var documentElement = getDocumentElement(offsetParent);\n var rect = getBoundingClientRect(elementOrVirtualElement, offsetParentIsScaled, isFixed);\n var scroll = {\n scrollLeft: 0,\n scrollTop: 0\n };\n var offsets = {\n x: 0,\n y: 0\n };\n\n if (isOffsetParentAnElement || !isOffsetParentAnElement && !isFixed) {\n if (getNodeName(offsetParent) !== 'body' || // https://github.com/popperjs/popper-core/issues/1078\n isScrollParent(documentElement)) {\n scroll = getNodeScroll(offsetParent);\n }\n\n if (isHTMLElement(offsetParent)) {\n offsets = getBoundingClientRect(offsetParent, true);\n offsets.x += offsetParent.clientLeft;\n offsets.y += offsetParent.clientTop;\n } else if (documentElement) {\n offsets.x = getWindowScrollBarX(documentElement);\n }\n }\n\n return {\n x: rect.left + scroll.scrollLeft - offsets.x,\n y: rect.top + scroll.scrollTop - offsets.y,\n width: rect.width,\n height: rect.height\n };\n}","import getWindowScroll from \"./getWindowScroll.js\";\nimport getWindow from \"./getWindow.js\";\nimport { isHTMLElement } from \"./instanceOf.js\";\nimport getHTMLElementScroll from \"./getHTMLElementScroll.js\";\nexport default function getNodeScroll(node) {\n if (node === getWindow(node) || !isHTMLElement(node)) {\n return getWindowScroll(node);\n } else {\n return getHTMLElementScroll(node);\n }\n}","export default function getHTMLElementScroll(element) {\n return {\n scrollLeft: element.scrollLeft,\n scrollTop: element.scrollTop\n };\n}","import { modifierPhases } from \"../enums.js\"; // source: https://stackoverflow.com/questions/49875255\n\nfunction order(modifiers) {\n var map = new Map();\n var visited = new Set();\n var result = [];\n modifiers.forEach(function (modifier) {\n map.set(modifier.name, modifier);\n }); // On visiting object, check for its dependencies and visit them recursively\n\n function sort(modifier) {\n visited.add(modifier.name);\n var requires = [].concat(modifier.requires || [], modifier.requiresIfExists || []);\n requires.forEach(function (dep) {\n if (!visited.has(dep)) {\n var depModifier = map.get(dep);\n\n if (depModifier) {\n sort(depModifier);\n }\n }\n });\n result.push(modifier);\n }\n\n modifiers.forEach(function (modifier) {\n if (!visited.has(modifier.name)) {\n // check for visited object\n sort(modifier);\n }\n });\n return result;\n}\n\nexport default function orderModifiers(modifiers) {\n // order based on dependencies\n var orderedModifiers = order(modifiers); // order based on phase\n\n return modifierPhases.reduce(function (acc, phase) {\n return acc.concat(orderedModifiers.filter(function (modifier) {\n return modifier.phase === phase;\n }));\n }, []);\n}","import getCompositeRect from \"./dom-utils/getCompositeRect.js\";\nimport getLayoutRect from \"./dom-utils/getLayoutRect.js\";\nimport listScrollParents from \"./dom-utils/listScrollParents.js\";\nimport getOffsetParent from \"./dom-utils/getOffsetParent.js\";\nimport orderModifiers from \"./utils/orderModifiers.js\";\nimport debounce from \"./utils/debounce.js\";\nimport mergeByName from \"./utils/mergeByName.js\";\nimport detectOverflow from \"./utils/detectOverflow.js\";\nimport { isElement } from \"./dom-utils/instanceOf.js\";\nvar DEFAULT_OPTIONS = {\n placement: 'bottom',\n modifiers: [],\n strategy: 'absolute'\n};\n\nfunction areValidElements() {\n for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {\n args[_key] = arguments[_key];\n }\n\n return !args.some(function (element) {\n return !(element && typeof element.getBoundingClientRect === 'function');\n });\n}\n\nexport function popperGenerator(generatorOptions) {\n if (generatorOptions === void 0) {\n generatorOptions = {};\n }\n\n var _generatorOptions = generatorOptions,\n _generatorOptions$def = _generatorOptions.defaultModifiers,\n defaultModifiers = _generatorOptions$def === void 0 ? [] : _generatorOptions$def,\n _generatorOptions$def2 = _generatorOptions.defaultOptions,\n defaultOptions = _generatorOptions$def2 === void 0 ? DEFAULT_OPTIONS : _generatorOptions$def2;\n return function createPopper(reference, popper, options) {\n if (options === void 0) {\n options = defaultOptions;\n }\n\n var state = {\n placement: 'bottom',\n orderedModifiers: [],\n options: Object.assign({}, DEFAULT_OPTIONS, defaultOptions),\n modifiersData: {},\n elements: {\n reference: reference,\n popper: popper\n },\n attributes: {},\n styles: {}\n };\n var effectCleanupFns = [];\n var isDestroyed = false;\n var instance = {\n state: state,\n setOptions: function setOptions(setOptionsAction) {\n var options = typeof setOptionsAction === 'function' ? setOptionsAction(state.options) : setOptionsAction;\n cleanupModifierEffects();\n state.options = Object.assign({}, defaultOptions, state.options, options);\n state.scrollParents = {\n reference: isElement(reference) ? listScrollParents(reference) : reference.contextElement ? listScrollParents(reference.contextElement) : [],\n popper: listScrollParents(popper)\n }; // Orders the modifiers based on their dependencies and `phase`\n // properties\n\n var orderedModifiers = orderModifiers(mergeByName([].concat(defaultModifiers, state.options.modifiers))); // Strip out disabled modifiers\n\n state.orderedModifiers = orderedModifiers.filter(function (m) {\n return m.enabled;\n });\n runModifierEffects();\n return instance.update();\n },\n // Sync update – it will always be executed, even if not necessary. This\n // is useful for low frequency updates where sync behavior simplifies the\n // logic.\n // For high frequency updates (e.g. `resize` and `scroll` events), always\n // prefer the async Popper#update method\n forceUpdate: function forceUpdate() {\n if (isDestroyed) {\n return;\n }\n\n var _state$elements = state.elements,\n reference = _state$elements.reference,\n popper = _state$elements.popper; // Don't proceed if `reference` or `popper` are not valid elements\n // anymore\n\n if (!areValidElements(reference, popper)) {\n return;\n } // Store the reference and popper rects to be read by modifiers\n\n\n state.rects = {\n reference: getCompositeRect(reference, getOffsetParent(popper), state.options.strategy === 'fixed'),\n popper: getLayoutRect(popper)\n }; // Modifiers have the ability to reset the current update cycle. The\n // most common use case for this is the `flip` modifier changing the\n // placement, which then needs to re-run all the modifiers, because the\n // logic was previously ran for the previous placement and is therefore\n // stale/incorrect\n\n state.reset = false;\n state.placement = state.options.placement; // On each update cycle, the `modifiersData` property for each modifier\n // is filled with the initial data specified by the modifier. This means\n // it doesn't persist and is fresh on each update.\n // To ensure persistent data, use `${name}#persistent`\n\n state.orderedModifiers.forEach(function (modifier) {\n return state.modifiersData[modifier.name] = Object.assign({}, modifier.data);\n });\n\n for (var index = 0; index < state.orderedModifiers.length; index++) {\n if (state.reset === true) {\n state.reset = false;\n index = -1;\n continue;\n }\n\n var _state$orderedModifie = state.orderedModifiers[index],\n fn = _state$orderedModifie.fn,\n _state$orderedModifie2 = _state$orderedModifie.options,\n _options = _state$orderedModifie2 === void 0 ? {} : _state$orderedModifie2,\n name = _state$orderedModifie.name;\n\n if (typeof fn === 'function') {\n state = fn({\n state: state,\n options: _options,\n name: name,\n instance: instance\n }) || state;\n }\n }\n },\n // Async and optimistically optimized update – it will not be executed if\n // not necessary (debounced to run at most once-per-tick)\n update: debounce(function () {\n return new Promise(function (resolve) {\n instance.forceUpdate();\n resolve(state);\n });\n }),\n destroy: function destroy() {\n cleanupModifierEffects();\n isDestroyed = true;\n }\n };\n\n if (!areValidElements(reference, popper)) {\n return instance;\n }\n\n instance.setOptions(options).then(function (state) {\n if (!isDestroyed && options.onFirstUpdate) {\n options.onFirstUpdate(state);\n }\n }); // Modifiers have the ability to execute arbitrary code before the first\n // update cycle runs. They will be executed in the same order as the update\n // cycle. This is useful when a modifier adds some persistent data that\n // other modifiers need to use, but the modifier is run after the dependent\n // one.\n\n function runModifierEffects() {\n state.orderedModifiers.forEach(function (_ref) {\n var name = _ref.name,\n _ref$options = _ref.options,\n options = _ref$options === void 0 ? {} : _ref$options,\n effect = _ref.effect;\n\n if (typeof effect === 'function') {\n var cleanupFn = effect({\n state: state,\n name: name,\n instance: instance,\n options: options\n });\n\n var noopFn = function noopFn() {};\n\n effectCleanupFns.push(cleanupFn || noopFn);\n }\n });\n }\n\n function cleanupModifierEffects() {\n effectCleanupFns.forEach(function (fn) {\n return fn();\n });\n effectCleanupFns = [];\n }\n\n return instance;\n };\n}\nexport var createPopper = /*#__PURE__*/popperGenerator(); // eslint-disable-next-line import/no-unused-modules\n\nexport { detectOverflow };","export default function debounce(fn) {\n var pending;\n return function () {\n if (!pending) {\n pending = new Promise(function (resolve) {\n Promise.resolve().then(function () {\n pending = undefined;\n resolve(fn());\n });\n });\n }\n\n return pending;\n };\n}","export default function mergeByName(modifiers) {\n var merged = modifiers.reduce(function (merged, current) {\n var existing = merged[current.name];\n merged[current.name] = existing ? Object.assign({}, existing, current, {\n options: Object.assign({}, existing.options, current.options),\n data: Object.assign({}, existing.data, current.data)\n }) : current;\n return merged;\n }, {}); // IE11 does not support Object.values\n\n return Object.keys(merged).map(function (key) {\n return merged[key];\n });\n}","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nimport offset from \"./modifiers/offset.js\";\nimport flip from \"./modifiers/flip.js\";\nimport preventOverflow from \"./modifiers/preventOverflow.js\";\nimport arrow from \"./modifiers/arrow.js\";\nimport hide from \"./modifiers/hide.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles, offset, flip, preventOverflow, arrow, hide];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow }; // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper as createPopperLite } from \"./popper-lite.js\"; // eslint-disable-next-line import/no-unused-modules\n\nexport * from \"./modifiers/index.js\";","import { popperGenerator, detectOverflow } from \"./createPopper.js\";\nimport eventListeners from \"./modifiers/eventListeners.js\";\nimport popperOffsets from \"./modifiers/popperOffsets.js\";\nimport computeStyles from \"./modifiers/computeStyles.js\";\nimport applyStyles from \"./modifiers/applyStyles.js\";\nvar defaultModifiers = [eventListeners, popperOffsets, computeStyles, applyStyles];\nvar createPopper = /*#__PURE__*/popperGenerator({\n defaultModifiers: defaultModifiers\n}); // eslint-disable-next-line import/no-unused-modules\n\nexport { createPopper, popperGenerator, defaultModifiers, detectOverflow };","/*!\n * Bootstrap v5.3.2 (https://getbootstrap.com/)\n * Copyright 2011-2023 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors)\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n */\nimport * as Popper from '@popperjs/core';\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/data.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n/**\n * Constants\n */\n\nconst elementMap = new Map();\nconst Data = {\n set(element, key, instance) {\n if (!elementMap.has(element)) {\n elementMap.set(element, new Map());\n }\n const instanceMap = elementMap.get(element);\n\n // make it clear we only want one instance per element\n // can be removed later when multiple key/instances are fine to be used\n if (!instanceMap.has(key) && instanceMap.size !== 0) {\n // eslint-disable-next-line no-console\n console.error(`Bootstrap doesn't allow more than one instance per element. Bound instance: ${Array.from(instanceMap.keys())[0]}.`);\n return;\n }\n instanceMap.set(key, instance);\n },\n get(element, key) {\n if (elementMap.has(element)) {\n return elementMap.get(element).get(key) || null;\n }\n return null;\n },\n remove(element, key) {\n if (!elementMap.has(element)) {\n return;\n }\n const instanceMap = elementMap.get(element);\n instanceMap.delete(key);\n\n // free up element references if there are no instances left for an element\n if (instanceMap.size === 0) {\n elementMap.delete(element);\n }\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/index.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst MAX_UID = 1000000;\nconst MILLISECONDS_MULTIPLIER = 1000;\nconst TRANSITION_END = 'transitionend';\n\n/**\n * Properly escape IDs selectors to handle weird IDs\n * @param {string} selector\n * @returns {string}\n */\nconst parseSelector = selector => {\n if (selector && window.CSS && window.CSS.escape) {\n // document.querySelector needs escaping to handle IDs (html5+) containing for instance /\n selector = selector.replace(/#([^\\s\"#']+)/g, (match, id) => `#${CSS.escape(id)}`);\n }\n return selector;\n};\n\n// Shout-out Angus Croll (https://goo.gl/pxwQGp)\nconst toType = object => {\n if (object === null || object === undefined) {\n return `${object}`;\n }\n return Object.prototype.toString.call(object).match(/\\s([a-z]+)/i)[1].toLowerCase();\n};\n\n/**\n * Public Util API\n */\n\nconst getUID = prefix => {\n do {\n prefix += Math.floor(Math.random() * MAX_UID);\n } while (document.getElementById(prefix));\n return prefix;\n};\nconst getTransitionDurationFromElement = element => {\n if (!element) {\n return 0;\n }\n\n // Get transition-duration of the element\n let {\n transitionDuration,\n transitionDelay\n } = window.getComputedStyle(element);\n const floatTransitionDuration = Number.parseFloat(transitionDuration);\n const floatTransitionDelay = Number.parseFloat(transitionDelay);\n\n // Return 0 if element or transition duration is not found\n if (!floatTransitionDuration && !floatTransitionDelay) {\n return 0;\n }\n\n // If multiple durations are defined, take the first\n transitionDuration = transitionDuration.split(',')[0];\n transitionDelay = transitionDelay.split(',')[0];\n return (Number.parseFloat(transitionDuration) + Number.parseFloat(transitionDelay)) * MILLISECONDS_MULTIPLIER;\n};\nconst triggerTransitionEnd = element => {\n element.dispatchEvent(new Event(TRANSITION_END));\n};\nconst isElement = object => {\n if (!object || typeof object !== 'object') {\n return false;\n }\n if (typeof object.jquery !== 'undefined') {\n object = object[0];\n }\n return typeof object.nodeType !== 'undefined';\n};\nconst getElement = object => {\n // it's a jQuery object or a node element\n if (isElement(object)) {\n return object.jquery ? object[0] : object;\n }\n if (typeof object === 'string' && object.length > 0) {\n return document.querySelector(parseSelector(object));\n }\n return null;\n};\nconst isVisible = element => {\n if (!isElement(element) || element.getClientRects().length === 0) {\n return false;\n }\n const elementIsVisible = getComputedStyle(element).getPropertyValue('visibility') === 'visible';\n // Handle `details` element as its content may falsie appear visible when it is closed\n const closedDetails = element.closest('details:not([open])');\n if (!closedDetails) {\n return elementIsVisible;\n }\n if (closedDetails !== element) {\n const summary = element.closest('summary');\n if (summary && summary.parentNode !== closedDetails) {\n return false;\n }\n if (summary === null) {\n return false;\n }\n }\n return elementIsVisible;\n};\nconst isDisabled = element => {\n if (!element || element.nodeType !== Node.ELEMENT_NODE) {\n return true;\n }\n if (element.classList.contains('disabled')) {\n return true;\n }\n if (typeof element.disabled !== 'undefined') {\n return element.disabled;\n }\n return element.hasAttribute('disabled') && element.getAttribute('disabled') !== 'false';\n};\nconst findShadowRoot = element => {\n if (!document.documentElement.attachShadow) {\n return null;\n }\n\n // Can find the shadow root otherwise it'll return the document\n if (typeof element.getRootNode === 'function') {\n const root = element.getRootNode();\n return root instanceof ShadowRoot ? root : null;\n }\n if (element instanceof ShadowRoot) {\n return element;\n }\n\n // when we don't find a shadow root\n if (!element.parentNode) {\n return null;\n }\n return findShadowRoot(element.parentNode);\n};\nconst noop = () => {};\n\n/**\n * Trick to restart an element's animation\n *\n * @param {HTMLElement} element\n * @return void\n *\n * @see https://www.charistheo.io/blog/2021/02/restart-a-css-animation-with-javascript/#restarting-a-css-animation\n */\nconst reflow = element => {\n element.offsetHeight; // eslint-disable-line no-unused-expressions\n};\n\nconst getjQuery = () => {\n if (window.jQuery && !document.body.hasAttribute('data-bs-no-jquery')) {\n return window.jQuery;\n }\n return null;\n};\nconst DOMContentLoadedCallbacks = [];\nconst onDOMContentLoaded = callback => {\n if (document.readyState === 'loading') {\n // add listener on the first call when the document is in loading state\n if (!DOMContentLoadedCallbacks.length) {\n document.addEventListener('DOMContentLoaded', () => {\n for (const callback of DOMContentLoadedCallbacks) {\n callback();\n }\n });\n }\n DOMContentLoadedCallbacks.push(callback);\n } else {\n callback();\n }\n};\nconst isRTL = () => document.documentElement.dir === 'rtl';\nconst defineJQueryPlugin = plugin => {\n onDOMContentLoaded(() => {\n const $ = getjQuery();\n /* istanbul ignore if */\n if ($) {\n const name = plugin.NAME;\n const JQUERY_NO_CONFLICT = $.fn[name];\n $.fn[name] = plugin.jQueryInterface;\n $.fn[name].Constructor = plugin;\n $.fn[name].noConflict = () => {\n $.fn[name] = JQUERY_NO_CONFLICT;\n return plugin.jQueryInterface;\n };\n }\n });\n};\nconst execute = (possibleCallback, args = [], defaultValue = possibleCallback) => {\n return typeof possibleCallback === 'function' ? possibleCallback(...args) : defaultValue;\n};\nconst executeAfterTransition = (callback, transitionElement, waitForTransition = true) => {\n if (!waitForTransition) {\n execute(callback);\n return;\n }\n const durationPadding = 5;\n const emulatedDuration = getTransitionDurationFromElement(transitionElement) + durationPadding;\n let called = false;\n const handler = ({\n target\n }) => {\n if (target !== transitionElement) {\n return;\n }\n called = true;\n transitionElement.removeEventListener(TRANSITION_END, handler);\n execute(callback);\n };\n transitionElement.addEventListener(TRANSITION_END, handler);\n setTimeout(() => {\n if (!called) {\n triggerTransitionEnd(transitionElement);\n }\n }, emulatedDuration);\n};\n\n/**\n * Return the previous/next element of a list.\n *\n * @param {array} list The list of elements\n * @param activeElement The active element\n * @param shouldGetNext Choose to get next or previous element\n * @param isCycleAllowed\n * @return {Element|elem} The proper element\n */\nconst getNextActiveElement = (list, activeElement, shouldGetNext, isCycleAllowed) => {\n const listLength = list.length;\n let index = list.indexOf(activeElement);\n\n // if the element does not exist in the list return an element\n // depending on the direction and if cycle is allowed\n if (index === -1) {\n return !shouldGetNext && isCycleAllowed ? list[listLength - 1] : list[0];\n }\n index += shouldGetNext ? 1 : -1;\n if (isCycleAllowed) {\n index = (index + listLength) % listLength;\n }\n return list[Math.max(0, Math.min(index, listLength - 1))];\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/event-handler.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst namespaceRegex = /[^.]*(?=\\..*)\\.|.*/;\nconst stripNameRegex = /\\..*/;\nconst stripUidRegex = /::\\d+$/;\nconst eventRegistry = {}; // Events storage\nlet uidEvent = 1;\nconst customEvents = {\n mouseenter: 'mouseover',\n mouseleave: 'mouseout'\n};\nconst nativeEvents = new Set(['click', 'dblclick', 'mouseup', 'mousedown', 'contextmenu', 'mousewheel', 'DOMMouseScroll', 'mouseover', 'mouseout', 'mousemove', 'selectstart', 'selectend', 'keydown', 'keypress', 'keyup', 'orientationchange', 'touchstart', 'touchmove', 'touchend', 'touchcancel', 'pointerdown', 'pointermove', 'pointerup', 'pointerleave', 'pointercancel', 'gesturestart', 'gesturechange', 'gestureend', 'focus', 'blur', 'change', 'reset', 'select', 'submit', 'focusin', 'focusout', 'load', 'unload', 'beforeunload', 'resize', 'move', 'DOMContentLoaded', 'readystatechange', 'error', 'abort', 'scroll']);\n\n/**\n * Private methods\n */\n\nfunction makeEventUid(element, uid) {\n return uid && `${uid}::${uidEvent++}` || element.uidEvent || uidEvent++;\n}\nfunction getElementEvents(element) {\n const uid = makeEventUid(element);\n element.uidEvent = uid;\n eventRegistry[uid] = eventRegistry[uid] || {};\n return eventRegistry[uid];\n}\nfunction bootstrapHandler(element, fn) {\n return function handler(event) {\n hydrateObj(event, {\n delegateTarget: element\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, fn);\n }\n return fn.apply(element, [event]);\n };\n}\nfunction bootstrapDelegationHandler(element, selector, fn) {\n return function handler(event) {\n const domElements = element.querySelectorAll(selector);\n for (let {\n target\n } = event; target && target !== this; target = target.parentNode) {\n for (const domElement of domElements) {\n if (domElement !== target) {\n continue;\n }\n hydrateObj(event, {\n delegateTarget: target\n });\n if (handler.oneOff) {\n EventHandler.off(element, event.type, selector, fn);\n }\n return fn.apply(target, [event]);\n }\n }\n };\n}\nfunction findHandler(events, callable, delegationSelector = null) {\n return Object.values(events).find(event => event.callable === callable && event.delegationSelector === delegationSelector);\n}\nfunction normalizeParameters(originalTypeEvent, handler, delegationFunction) {\n const isDelegated = typeof handler === 'string';\n // TODO: tooltip passes `false` instead of selector, so we need to check\n const callable = isDelegated ? delegationFunction : handler || delegationFunction;\n let typeEvent = getTypeEvent(originalTypeEvent);\n if (!nativeEvents.has(typeEvent)) {\n typeEvent = originalTypeEvent;\n }\n return [isDelegated, callable, typeEvent];\n}\nfunction addHandler(element, originalTypeEvent, handler, delegationFunction, oneOff) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n let [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n\n // in case of mouseenter or mouseleave wrap the handler within a function that checks for its DOM position\n // this prevents the handler from being dispatched the same way as mouseover or mouseout does\n if (originalTypeEvent in customEvents) {\n const wrapFunction = fn => {\n return function (event) {\n if (!event.relatedTarget || event.relatedTarget !== event.delegateTarget && !event.delegateTarget.contains(event.relatedTarget)) {\n return fn.call(this, event);\n }\n };\n };\n callable = wrapFunction(callable);\n }\n const events = getElementEvents(element);\n const handlers = events[typeEvent] || (events[typeEvent] = {});\n const previousFunction = findHandler(handlers, callable, isDelegated ? handler : null);\n if (previousFunction) {\n previousFunction.oneOff = previousFunction.oneOff && oneOff;\n return;\n }\n const uid = makeEventUid(callable, originalTypeEvent.replace(namespaceRegex, ''));\n const fn = isDelegated ? bootstrapDelegationHandler(element, handler, callable) : bootstrapHandler(element, callable);\n fn.delegationSelector = isDelegated ? handler : null;\n fn.callable = callable;\n fn.oneOff = oneOff;\n fn.uidEvent = uid;\n handlers[uid] = fn;\n element.addEventListener(typeEvent, fn, isDelegated);\n}\nfunction removeHandler(element, events, typeEvent, handler, delegationSelector) {\n const fn = findHandler(events[typeEvent], handler, delegationSelector);\n if (!fn) {\n return;\n }\n element.removeEventListener(typeEvent, fn, Boolean(delegationSelector));\n delete events[typeEvent][fn.uidEvent];\n}\nfunction removeNamespacedHandlers(element, events, typeEvent, namespace) {\n const storeElementEvent = events[typeEvent] || {};\n for (const [handlerKey, event] of Object.entries(storeElementEvent)) {\n if (handlerKey.includes(namespace)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n}\nfunction getTypeEvent(event) {\n // allow to get the native events from namespaced events ('click.bs.button' --> 'click')\n event = event.replace(stripNameRegex, '');\n return customEvents[event] || event;\n}\nconst EventHandler = {\n on(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, false);\n },\n one(element, event, handler, delegationFunction) {\n addHandler(element, event, handler, delegationFunction, true);\n },\n off(element, originalTypeEvent, handler, delegationFunction) {\n if (typeof originalTypeEvent !== 'string' || !element) {\n return;\n }\n const [isDelegated, callable, typeEvent] = normalizeParameters(originalTypeEvent, handler, delegationFunction);\n const inNamespace = typeEvent !== originalTypeEvent;\n const events = getElementEvents(element);\n const storeElementEvent = events[typeEvent] || {};\n const isNamespace = originalTypeEvent.startsWith('.');\n if (typeof callable !== 'undefined') {\n // Simplest case: handler is passed, remove that listener ONLY.\n if (!Object.keys(storeElementEvent).length) {\n return;\n }\n removeHandler(element, events, typeEvent, callable, isDelegated ? handler : null);\n return;\n }\n if (isNamespace) {\n for (const elementEvent of Object.keys(events)) {\n removeNamespacedHandlers(element, events, elementEvent, originalTypeEvent.slice(1));\n }\n }\n for (const [keyHandlers, event] of Object.entries(storeElementEvent)) {\n const handlerKey = keyHandlers.replace(stripUidRegex, '');\n if (!inNamespace || originalTypeEvent.includes(handlerKey)) {\n removeHandler(element, events, typeEvent, event.callable, event.delegationSelector);\n }\n }\n },\n trigger(element, event, args) {\n if (typeof event !== 'string' || !element) {\n return null;\n }\n const $ = getjQuery();\n const typeEvent = getTypeEvent(event);\n const inNamespace = event !== typeEvent;\n let jQueryEvent = null;\n let bubbles = true;\n let nativeDispatch = true;\n let defaultPrevented = false;\n if (inNamespace && $) {\n jQueryEvent = $.Event(event, args);\n $(element).trigger(jQueryEvent);\n bubbles = !jQueryEvent.isPropagationStopped();\n nativeDispatch = !jQueryEvent.isImmediatePropagationStopped();\n defaultPrevented = jQueryEvent.isDefaultPrevented();\n }\n const evt = hydrateObj(new Event(event, {\n bubbles,\n cancelable: true\n }), args);\n if (defaultPrevented) {\n evt.preventDefault();\n }\n if (nativeDispatch) {\n element.dispatchEvent(evt);\n }\n if (evt.defaultPrevented && jQueryEvent) {\n jQueryEvent.preventDefault();\n }\n return evt;\n }\n};\nfunction hydrateObj(obj, meta = {}) {\n for (const [key, value] of Object.entries(meta)) {\n try {\n obj[key] = value;\n } catch (_unused) {\n Object.defineProperty(obj, key, {\n configurable: true,\n get() {\n return value;\n }\n });\n }\n }\n return obj;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/manipulator.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nfunction normalizeData(value) {\n if (value === 'true') {\n return true;\n }\n if (value === 'false') {\n return false;\n }\n if (value === Number(value).toString()) {\n return Number(value);\n }\n if (value === '' || value === 'null') {\n return null;\n }\n if (typeof value !== 'string') {\n return value;\n }\n try {\n return JSON.parse(decodeURIComponent(value));\n } catch (_unused) {\n return value;\n }\n}\nfunction normalizeDataKey(key) {\n return key.replace(/[A-Z]/g, chr => `-${chr.toLowerCase()}`);\n}\nconst Manipulator = {\n setDataAttribute(element, key, value) {\n element.setAttribute(`data-bs-${normalizeDataKey(key)}`, value);\n },\n removeDataAttribute(element, key) {\n element.removeAttribute(`data-bs-${normalizeDataKey(key)}`);\n },\n getDataAttributes(element) {\n if (!element) {\n return {};\n }\n const attributes = {};\n const bsKeys = Object.keys(element.dataset).filter(key => key.startsWith('bs') && !key.startsWith('bsConfig'));\n for (const key of bsKeys) {\n let pureKey = key.replace(/^bs/, '');\n pureKey = pureKey.charAt(0).toLowerCase() + pureKey.slice(1, pureKey.length);\n attributes[pureKey] = normalizeData(element.dataset[key]);\n }\n return attributes;\n },\n getDataAttribute(element, key) {\n return normalizeData(element.getAttribute(`data-bs-${normalizeDataKey(key)}`));\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/config.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Class definition\n */\n\nclass Config {\n // Getters\n static get Default() {\n return {};\n }\n static get DefaultType() {\n return {};\n }\n static get NAME() {\n throw new Error('You have to implement the static method \"NAME\", for each component!');\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n return config;\n }\n _mergeConfigObj(config, element) {\n const jsonConfig = isElement(element) ? Manipulator.getDataAttribute(element, 'config') : {}; // try to parse\n\n return {\n ...this.constructor.Default,\n ...(typeof jsonConfig === 'object' ? jsonConfig : {}),\n ...(isElement(element) ? Manipulator.getDataAttributes(element) : {}),\n ...(typeof config === 'object' ? config : {})\n };\n }\n _typeCheckConfig(config, configTypes = this.constructor.DefaultType) {\n for (const [property, expectedTypes] of Object.entries(configTypes)) {\n const value = config[property];\n const valueType = isElement(value) ? 'element' : toType(value);\n if (!new RegExp(expectedTypes).test(valueType)) {\n throw new TypeError(`${this.constructor.NAME.toUpperCase()}: Option \"${property}\" provided type \"${valueType}\" but expected type \"${expectedTypes}\".`);\n }\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap base-component.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst VERSION = '5.3.2';\n\n/**\n * Class definition\n */\n\nclass BaseComponent extends Config {\n constructor(element, config) {\n super();\n element = getElement(element);\n if (!element) {\n return;\n }\n this._element = element;\n this._config = this._getConfig(config);\n Data.set(this._element, this.constructor.DATA_KEY, this);\n }\n\n // Public\n dispose() {\n Data.remove(this._element, this.constructor.DATA_KEY);\n EventHandler.off(this._element, this.constructor.EVENT_KEY);\n for (const propertyName of Object.getOwnPropertyNames(this)) {\n this[propertyName] = null;\n }\n }\n _queueCallback(callback, element, isAnimated = true) {\n executeAfterTransition(callback, element, isAnimated);\n }\n _getConfig(config) {\n config = this._mergeConfigObj(config, this._element);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n\n // Static\n static getInstance(element) {\n return Data.get(getElement(element), this.DATA_KEY);\n }\n static getOrCreateInstance(element, config = {}) {\n return this.getInstance(element) || new this(element, typeof config === 'object' ? config : null);\n }\n static get VERSION() {\n return VERSION;\n }\n static get DATA_KEY() {\n return `bs.${this.NAME}`;\n }\n static get EVENT_KEY() {\n return `.${this.DATA_KEY}`;\n }\n static eventName(name) {\n return `${name}${this.EVENT_KEY}`;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dom/selector-engine.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst getSelector = element => {\n let selector = element.getAttribute('data-bs-target');\n if (!selector || selector === '#') {\n let hrefAttribute = element.getAttribute('href');\n\n // The only valid content that could double as a selector are IDs or classes,\n // so everything starting with `#` or `.`. If a \"real\" URL is used as the selector,\n // `document.querySelector` will rightfully complain it is invalid.\n // See https://github.com/twbs/bootstrap/issues/32273\n if (!hrefAttribute || !hrefAttribute.includes('#') && !hrefAttribute.startsWith('.')) {\n return null;\n }\n\n // Just in case some CMS puts out a full URL with the anchor appended\n if (hrefAttribute.includes('#') && !hrefAttribute.startsWith('#')) {\n hrefAttribute = `#${hrefAttribute.split('#')[1]}`;\n }\n selector = hrefAttribute && hrefAttribute !== '#' ? parseSelector(hrefAttribute.trim()) : null;\n }\n return selector;\n};\nconst SelectorEngine = {\n find(selector, element = document.documentElement) {\n return [].concat(...Element.prototype.querySelectorAll.call(element, selector));\n },\n findOne(selector, element = document.documentElement) {\n return Element.prototype.querySelector.call(element, selector);\n },\n children(element, selector) {\n return [].concat(...element.children).filter(child => child.matches(selector));\n },\n parents(element, selector) {\n const parents = [];\n let ancestor = element.parentNode.closest(selector);\n while (ancestor) {\n parents.push(ancestor);\n ancestor = ancestor.parentNode.closest(selector);\n }\n return parents;\n },\n prev(element, selector) {\n let previous = element.previousElementSibling;\n while (previous) {\n if (previous.matches(selector)) {\n return [previous];\n }\n previous = previous.previousElementSibling;\n }\n return [];\n },\n // TODO: this is now unused; remove later along with prev()\n next(element, selector) {\n let next = element.nextElementSibling;\n while (next) {\n if (next.matches(selector)) {\n return [next];\n }\n next = next.nextElementSibling;\n }\n return [];\n },\n focusableChildren(element) {\n const focusables = ['a', 'button', 'input', 'textarea', 'select', 'details', '[tabindex]', '[contenteditable=\"true\"]'].map(selector => `${selector}:not([tabindex^=\"-\"])`).join(',');\n return this.find(focusables, element).filter(el => !isDisabled(el) && isVisible(el));\n },\n getSelectorFromElement(element) {\n const selector = getSelector(element);\n if (selector) {\n return SelectorEngine.findOne(selector) ? selector : null;\n }\n return null;\n },\n getElementFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.findOne(selector) : null;\n },\n getMultipleElementsFromSelector(element) {\n const selector = getSelector(element);\n return selector ? SelectorEngine.find(selector) : [];\n }\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/component-functions.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\nconst enableDismissTrigger = (component, method = 'hide') => {\n const clickEvent = `click.dismiss${component.EVENT_KEY}`;\n const name = component.NAME;\n EventHandler.on(document, clickEvent, `[data-bs-dismiss=\"${name}\"]`, function (event) {\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n const target = SelectorEngine.getElementFromSelector(this) || this.closest(`.${name}`);\n const instance = component.getOrCreateInstance(target);\n\n // Method argument is left, for Alert and only, as it doesn't implement the 'hide' method\n instance[method]();\n });\n};\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap alert.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$f = 'alert';\nconst DATA_KEY$a = 'bs.alert';\nconst EVENT_KEY$b = `.${DATA_KEY$a}`;\nconst EVENT_CLOSE = `close${EVENT_KEY$b}`;\nconst EVENT_CLOSED = `closed${EVENT_KEY$b}`;\nconst CLASS_NAME_FADE$5 = 'fade';\nconst CLASS_NAME_SHOW$8 = 'show';\n\n/**\n * Class definition\n */\n\nclass Alert extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$f;\n }\n\n // Public\n close() {\n const closeEvent = EventHandler.trigger(this._element, EVENT_CLOSE);\n if (closeEvent.defaultPrevented) {\n return;\n }\n this._element.classList.remove(CLASS_NAME_SHOW$8);\n const isAnimated = this._element.classList.contains(CLASS_NAME_FADE$5);\n this._queueCallback(() => this._destroyElement(), this._element, isAnimated);\n }\n\n // Private\n _destroyElement() {\n this._element.remove();\n EventHandler.trigger(this._element, EVENT_CLOSED);\n this.dispose();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Alert.getOrCreateInstance(this);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nenableDismissTrigger(Alert, 'close');\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Alert);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap button.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$e = 'button';\nconst DATA_KEY$9 = 'bs.button';\nconst EVENT_KEY$a = `.${DATA_KEY$9}`;\nconst DATA_API_KEY$6 = '.data-api';\nconst CLASS_NAME_ACTIVE$3 = 'active';\nconst SELECTOR_DATA_TOGGLE$5 = '[data-bs-toggle=\"button\"]';\nconst EVENT_CLICK_DATA_API$6 = `click${EVENT_KEY$a}${DATA_API_KEY$6}`;\n\n/**\n * Class definition\n */\n\nclass Button extends BaseComponent {\n // Getters\n static get NAME() {\n return NAME$e;\n }\n\n // Public\n toggle() {\n // Toggle class and sync the `aria-pressed` attribute with the return value of the `.toggle()` method\n this._element.setAttribute('aria-pressed', this._element.classList.toggle(CLASS_NAME_ACTIVE$3));\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Button.getOrCreateInstance(this);\n if (config === 'toggle') {\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$6, SELECTOR_DATA_TOGGLE$5, event => {\n event.preventDefault();\n const button = event.target.closest(SELECTOR_DATA_TOGGLE$5);\n const data = Button.getOrCreateInstance(button);\n data.toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Button);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/swipe.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$d = 'swipe';\nconst EVENT_KEY$9 = '.bs.swipe';\nconst EVENT_TOUCHSTART = `touchstart${EVENT_KEY$9}`;\nconst EVENT_TOUCHMOVE = `touchmove${EVENT_KEY$9}`;\nconst EVENT_TOUCHEND = `touchend${EVENT_KEY$9}`;\nconst EVENT_POINTERDOWN = `pointerdown${EVENT_KEY$9}`;\nconst EVENT_POINTERUP = `pointerup${EVENT_KEY$9}`;\nconst POINTER_TYPE_TOUCH = 'touch';\nconst POINTER_TYPE_PEN = 'pen';\nconst CLASS_NAME_POINTER_EVENT = 'pointer-event';\nconst SWIPE_THRESHOLD = 40;\nconst Default$c = {\n endCallback: null,\n leftCallback: null,\n rightCallback: null\n};\nconst DefaultType$c = {\n endCallback: '(function|null)',\n leftCallback: '(function|null)',\n rightCallback: '(function|null)'\n};\n\n/**\n * Class definition\n */\n\nclass Swipe extends Config {\n constructor(element, config) {\n super();\n this._element = element;\n if (!element || !Swipe.isSupported()) {\n return;\n }\n this._config = this._getConfig(config);\n this._deltaX = 0;\n this._supportPointerEvents = Boolean(window.PointerEvent);\n this._initEvents();\n }\n\n // Getters\n static get Default() {\n return Default$c;\n }\n static get DefaultType() {\n return DefaultType$c;\n }\n static get NAME() {\n return NAME$d;\n }\n\n // Public\n dispose() {\n EventHandler.off(this._element, EVENT_KEY$9);\n }\n\n // Private\n _start(event) {\n if (!this._supportPointerEvents) {\n this._deltaX = event.touches[0].clientX;\n return;\n }\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX;\n }\n }\n _end(event) {\n if (this._eventIsPointerPenTouch(event)) {\n this._deltaX = event.clientX - this._deltaX;\n }\n this._handleSwipe();\n execute(this._config.endCallback);\n }\n _move(event) {\n this._deltaX = event.touches && event.touches.length > 1 ? 0 : event.touches[0].clientX - this._deltaX;\n }\n _handleSwipe() {\n const absDeltaX = Math.abs(this._deltaX);\n if (absDeltaX <= SWIPE_THRESHOLD) {\n return;\n }\n const direction = absDeltaX / this._deltaX;\n this._deltaX = 0;\n if (!direction) {\n return;\n }\n execute(direction > 0 ? this._config.rightCallback : this._config.leftCallback);\n }\n _initEvents() {\n if (this._supportPointerEvents) {\n EventHandler.on(this._element, EVENT_POINTERDOWN, event => this._start(event));\n EventHandler.on(this._element, EVENT_POINTERUP, event => this._end(event));\n this._element.classList.add(CLASS_NAME_POINTER_EVENT);\n } else {\n EventHandler.on(this._element, EVENT_TOUCHSTART, event => this._start(event));\n EventHandler.on(this._element, EVENT_TOUCHMOVE, event => this._move(event));\n EventHandler.on(this._element, EVENT_TOUCHEND, event => this._end(event));\n }\n }\n _eventIsPointerPenTouch(event) {\n return this._supportPointerEvents && (event.pointerType === POINTER_TYPE_PEN || event.pointerType === POINTER_TYPE_TOUCH);\n }\n\n // Static\n static isSupported() {\n return 'ontouchstart' in document.documentElement || navigator.maxTouchPoints > 0;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap carousel.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$c = 'carousel';\nconst DATA_KEY$8 = 'bs.carousel';\nconst EVENT_KEY$8 = `.${DATA_KEY$8}`;\nconst DATA_API_KEY$5 = '.data-api';\nconst ARROW_LEFT_KEY$1 = 'ArrowLeft';\nconst ARROW_RIGHT_KEY$1 = 'ArrowRight';\nconst TOUCHEVENT_COMPAT_WAIT = 500; // Time for mouse compat events to fire after touch\n\nconst ORDER_NEXT = 'next';\nconst ORDER_PREV = 'prev';\nconst DIRECTION_LEFT = 'left';\nconst DIRECTION_RIGHT = 'right';\nconst EVENT_SLIDE = `slide${EVENT_KEY$8}`;\nconst EVENT_SLID = `slid${EVENT_KEY$8}`;\nconst EVENT_KEYDOWN$1 = `keydown${EVENT_KEY$8}`;\nconst EVENT_MOUSEENTER$1 = `mouseenter${EVENT_KEY$8}`;\nconst EVENT_MOUSELEAVE$1 = `mouseleave${EVENT_KEY$8}`;\nconst EVENT_DRAG_START = `dragstart${EVENT_KEY$8}`;\nconst EVENT_LOAD_DATA_API$3 = `load${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst EVENT_CLICK_DATA_API$5 = `click${EVENT_KEY$8}${DATA_API_KEY$5}`;\nconst CLASS_NAME_CAROUSEL = 'carousel';\nconst CLASS_NAME_ACTIVE$2 = 'active';\nconst CLASS_NAME_SLIDE = 'slide';\nconst CLASS_NAME_END = 'carousel-item-end';\nconst CLASS_NAME_START = 'carousel-item-start';\nconst CLASS_NAME_NEXT = 'carousel-item-next';\nconst CLASS_NAME_PREV = 'carousel-item-prev';\nconst SELECTOR_ACTIVE = '.active';\nconst SELECTOR_ITEM = '.carousel-item';\nconst SELECTOR_ACTIVE_ITEM = SELECTOR_ACTIVE + SELECTOR_ITEM;\nconst SELECTOR_ITEM_IMG = '.carousel-item img';\nconst SELECTOR_INDICATORS = '.carousel-indicators';\nconst SELECTOR_DATA_SLIDE = '[data-bs-slide], [data-bs-slide-to]';\nconst SELECTOR_DATA_RIDE = '[data-bs-ride=\"carousel\"]';\nconst KEY_TO_DIRECTION = {\n [ARROW_LEFT_KEY$1]: DIRECTION_RIGHT,\n [ARROW_RIGHT_KEY$1]: DIRECTION_LEFT\n};\nconst Default$b = {\n interval: 5000,\n keyboard: true,\n pause: 'hover',\n ride: false,\n touch: true,\n wrap: true\n};\nconst DefaultType$b = {\n interval: '(number|boolean)',\n // TODO:v6 remove boolean support\n keyboard: 'boolean',\n pause: '(string|boolean)',\n ride: '(boolean|string)',\n touch: 'boolean',\n wrap: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Carousel extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._interval = null;\n this._activeElement = null;\n this._isSliding = false;\n this.touchTimeout = null;\n this._swipeHelper = null;\n this._indicatorsElement = SelectorEngine.findOne(SELECTOR_INDICATORS, this._element);\n this._addEventListeners();\n if (this._config.ride === CLASS_NAME_CAROUSEL) {\n this.cycle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$b;\n }\n static get DefaultType() {\n return DefaultType$b;\n }\n static get NAME() {\n return NAME$c;\n }\n\n // Public\n next() {\n this._slide(ORDER_NEXT);\n }\n nextWhenVisible() {\n // FIXME TODO use `document.visibilityState`\n // Don't call next when the page isn't visible\n // or the carousel or its parent isn't visible\n if (!document.hidden && isVisible(this._element)) {\n this.next();\n }\n }\n prev() {\n this._slide(ORDER_PREV);\n }\n pause() {\n if (this._isSliding) {\n triggerTransitionEnd(this._element);\n }\n this._clearInterval();\n }\n cycle() {\n this._clearInterval();\n this._updateInterval();\n this._interval = setInterval(() => this.nextWhenVisible(), this._config.interval);\n }\n _maybeEnableCycle() {\n if (!this._config.ride) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.cycle());\n return;\n }\n this.cycle();\n }\n to(index) {\n const items = this._getItems();\n if (index > items.length - 1 || index < 0) {\n return;\n }\n if (this._isSliding) {\n EventHandler.one(this._element, EVENT_SLID, () => this.to(index));\n return;\n }\n const activeIndex = this._getItemIndex(this._getActive());\n if (activeIndex === index) {\n return;\n }\n const order = index > activeIndex ? ORDER_NEXT : ORDER_PREV;\n this._slide(order, items[index]);\n }\n dispose() {\n if (this._swipeHelper) {\n this._swipeHelper.dispose();\n }\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n config.defaultInterval = config.interval;\n return config;\n }\n _addEventListeners() {\n if (this._config.keyboard) {\n EventHandler.on(this._element, EVENT_KEYDOWN$1, event => this._keydown(event));\n }\n if (this._config.pause === 'hover') {\n EventHandler.on(this._element, EVENT_MOUSEENTER$1, () => this.pause());\n EventHandler.on(this._element, EVENT_MOUSELEAVE$1, () => this._maybeEnableCycle());\n }\n if (this._config.touch && Swipe.isSupported()) {\n this._addTouchEventListeners();\n }\n }\n _addTouchEventListeners() {\n for (const img of SelectorEngine.find(SELECTOR_ITEM_IMG, this._element)) {\n EventHandler.on(img, EVENT_DRAG_START, event => event.preventDefault());\n }\n const endCallBack = () => {\n if (this._config.pause !== 'hover') {\n return;\n }\n\n // If it's a touch-enabled device, mouseenter/leave are fired as\n // part of the mouse compatibility events on first tap - the carousel\n // would stop cycling until user tapped out of it;\n // here, we listen for touchend, explicitly pause the carousel\n // (as if it's the second time we tap on it, mouseenter compat event\n // is NOT fired) and after a timeout (to allow for mouse compatibility\n // events to fire) we explicitly restart cycling\n\n this.pause();\n if (this.touchTimeout) {\n clearTimeout(this.touchTimeout);\n }\n this.touchTimeout = setTimeout(() => this._maybeEnableCycle(), TOUCHEVENT_COMPAT_WAIT + this._config.interval);\n };\n const swipeConfig = {\n leftCallback: () => this._slide(this._directionToOrder(DIRECTION_LEFT)),\n rightCallback: () => this._slide(this._directionToOrder(DIRECTION_RIGHT)),\n endCallback: endCallBack\n };\n this._swipeHelper = new Swipe(this._element, swipeConfig);\n }\n _keydown(event) {\n if (/input|textarea/i.test(event.target.tagName)) {\n return;\n }\n const direction = KEY_TO_DIRECTION[event.key];\n if (direction) {\n event.preventDefault();\n this._slide(this._directionToOrder(direction));\n }\n }\n _getItemIndex(element) {\n return this._getItems().indexOf(element);\n }\n _setActiveIndicatorElement(index) {\n if (!this._indicatorsElement) {\n return;\n }\n const activeIndicator = SelectorEngine.findOne(SELECTOR_ACTIVE, this._indicatorsElement);\n activeIndicator.classList.remove(CLASS_NAME_ACTIVE$2);\n activeIndicator.removeAttribute('aria-current');\n const newActiveIndicator = SelectorEngine.findOne(`[data-bs-slide-to=\"${index}\"]`, this._indicatorsElement);\n if (newActiveIndicator) {\n newActiveIndicator.classList.add(CLASS_NAME_ACTIVE$2);\n newActiveIndicator.setAttribute('aria-current', 'true');\n }\n }\n _updateInterval() {\n const element = this._activeElement || this._getActive();\n if (!element) {\n return;\n }\n const elementInterval = Number.parseInt(element.getAttribute('data-bs-interval'), 10);\n this._config.interval = elementInterval || this._config.defaultInterval;\n }\n _slide(order, element = null) {\n if (this._isSliding) {\n return;\n }\n const activeElement = this._getActive();\n const isNext = order === ORDER_NEXT;\n const nextElement = element || getNextActiveElement(this._getItems(), activeElement, isNext, this._config.wrap);\n if (nextElement === activeElement) {\n return;\n }\n const nextElementIndex = this._getItemIndex(nextElement);\n const triggerEvent = eventName => {\n return EventHandler.trigger(this._element, eventName, {\n relatedTarget: nextElement,\n direction: this._orderToDirection(order),\n from: this._getItemIndex(activeElement),\n to: nextElementIndex\n });\n };\n const slideEvent = triggerEvent(EVENT_SLIDE);\n if (slideEvent.defaultPrevented) {\n return;\n }\n if (!activeElement || !nextElement) {\n // Some weirdness is happening, so we bail\n // TODO: change tests that use empty divs to avoid this check\n return;\n }\n const isCycling = Boolean(this._interval);\n this.pause();\n this._isSliding = true;\n this._setActiveIndicatorElement(nextElementIndex);\n this._activeElement = nextElement;\n const directionalClassName = isNext ? CLASS_NAME_START : CLASS_NAME_END;\n const orderClassName = isNext ? CLASS_NAME_NEXT : CLASS_NAME_PREV;\n nextElement.classList.add(orderClassName);\n reflow(nextElement);\n activeElement.classList.add(directionalClassName);\n nextElement.classList.add(directionalClassName);\n const completeCallBack = () => {\n nextElement.classList.remove(directionalClassName, orderClassName);\n nextElement.classList.add(CLASS_NAME_ACTIVE$2);\n activeElement.classList.remove(CLASS_NAME_ACTIVE$2, orderClassName, directionalClassName);\n this._isSliding = false;\n triggerEvent(EVENT_SLID);\n };\n this._queueCallback(completeCallBack, activeElement, this._isAnimated());\n if (isCycling) {\n this.cycle();\n }\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_SLIDE);\n }\n _getActive() {\n return SelectorEngine.findOne(SELECTOR_ACTIVE_ITEM, this._element);\n }\n _getItems() {\n return SelectorEngine.find(SELECTOR_ITEM, this._element);\n }\n _clearInterval() {\n if (this._interval) {\n clearInterval(this._interval);\n this._interval = null;\n }\n }\n _directionToOrder(direction) {\n if (isRTL()) {\n return direction === DIRECTION_LEFT ? ORDER_PREV : ORDER_NEXT;\n }\n return direction === DIRECTION_LEFT ? ORDER_NEXT : ORDER_PREV;\n }\n _orderToDirection(order) {\n if (isRTL()) {\n return order === ORDER_PREV ? DIRECTION_LEFT : DIRECTION_RIGHT;\n }\n return order === ORDER_PREV ? DIRECTION_RIGHT : DIRECTION_LEFT;\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Carousel.getOrCreateInstance(this, config);\n if (typeof config === 'number') {\n data.to(config);\n return;\n }\n if (typeof config === 'string') {\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$5, SELECTOR_DATA_SLIDE, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (!target || !target.classList.contains(CLASS_NAME_CAROUSEL)) {\n return;\n }\n event.preventDefault();\n const carousel = Carousel.getOrCreateInstance(target);\n const slideIndex = this.getAttribute('data-bs-slide-to');\n if (slideIndex) {\n carousel.to(slideIndex);\n carousel._maybeEnableCycle();\n return;\n }\n if (Manipulator.getDataAttribute(this, 'slide') === 'next') {\n carousel.next();\n carousel._maybeEnableCycle();\n return;\n }\n carousel.prev();\n carousel._maybeEnableCycle();\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$3, () => {\n const carousels = SelectorEngine.find(SELECTOR_DATA_RIDE);\n for (const carousel of carousels) {\n Carousel.getOrCreateInstance(carousel);\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Carousel);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap collapse.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$b = 'collapse';\nconst DATA_KEY$7 = 'bs.collapse';\nconst EVENT_KEY$7 = `.${DATA_KEY$7}`;\nconst DATA_API_KEY$4 = '.data-api';\nconst EVENT_SHOW$6 = `show${EVENT_KEY$7}`;\nconst EVENT_SHOWN$6 = `shown${EVENT_KEY$7}`;\nconst EVENT_HIDE$6 = `hide${EVENT_KEY$7}`;\nconst EVENT_HIDDEN$6 = `hidden${EVENT_KEY$7}`;\nconst EVENT_CLICK_DATA_API$4 = `click${EVENT_KEY$7}${DATA_API_KEY$4}`;\nconst CLASS_NAME_SHOW$7 = 'show';\nconst CLASS_NAME_COLLAPSE = 'collapse';\nconst CLASS_NAME_COLLAPSING = 'collapsing';\nconst CLASS_NAME_COLLAPSED = 'collapsed';\nconst CLASS_NAME_DEEPER_CHILDREN = `:scope .${CLASS_NAME_COLLAPSE} .${CLASS_NAME_COLLAPSE}`;\nconst CLASS_NAME_HORIZONTAL = 'collapse-horizontal';\nconst WIDTH = 'width';\nconst HEIGHT = 'height';\nconst SELECTOR_ACTIVES = '.collapse.show, .collapse.collapsing';\nconst SELECTOR_DATA_TOGGLE$4 = '[data-bs-toggle=\"collapse\"]';\nconst Default$a = {\n parent: null,\n toggle: true\n};\nconst DefaultType$a = {\n parent: '(null|element)',\n toggle: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Collapse extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isTransitioning = false;\n this._triggerArray = [];\n const toggleList = SelectorEngine.find(SELECTOR_DATA_TOGGLE$4);\n for (const elem of toggleList) {\n const selector = SelectorEngine.getSelectorFromElement(elem);\n const filterElement = SelectorEngine.find(selector).filter(foundElement => foundElement === this._element);\n if (selector !== null && filterElement.length) {\n this._triggerArray.push(elem);\n }\n }\n this._initializeChildren();\n if (!this._config.parent) {\n this._addAriaAndCollapsedClass(this._triggerArray, this._isShown());\n }\n if (this._config.toggle) {\n this.toggle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$a;\n }\n static get DefaultType() {\n return DefaultType$a;\n }\n static get NAME() {\n return NAME$b;\n }\n\n // Public\n toggle() {\n if (this._isShown()) {\n this.hide();\n } else {\n this.show();\n }\n }\n show() {\n if (this._isTransitioning || this._isShown()) {\n return;\n }\n let activeChildren = [];\n\n // find active children\n if (this._config.parent) {\n activeChildren = this._getFirstLevelChildren(SELECTOR_ACTIVES).filter(element => element !== this._element).map(element => Collapse.getOrCreateInstance(element, {\n toggle: false\n }));\n }\n if (activeChildren.length && activeChildren[0]._isTransitioning) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_SHOW$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n for (const activeInstance of activeChildren) {\n activeInstance.hide();\n }\n const dimension = this._getDimension();\n this._element.classList.remove(CLASS_NAME_COLLAPSE);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.style[dimension] = 0;\n this._addAriaAndCollapsedClass(this._triggerArray, true);\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n this._element.style[dimension] = '';\n EventHandler.trigger(this._element, EVENT_SHOWN$6);\n };\n const capitalizedDimension = dimension[0].toUpperCase() + dimension.slice(1);\n const scrollSize = `scroll${capitalizedDimension}`;\n this._queueCallback(complete, this._element, true);\n this._element.style[dimension] = `${this._element[scrollSize]}px`;\n }\n hide() {\n if (this._isTransitioning || !this._isShown()) {\n return;\n }\n const startEvent = EventHandler.trigger(this._element, EVENT_HIDE$6);\n if (startEvent.defaultPrevented) {\n return;\n }\n const dimension = this._getDimension();\n this._element.style[dimension] = `${this._element.getBoundingClientRect()[dimension]}px`;\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_COLLAPSING);\n this._element.classList.remove(CLASS_NAME_COLLAPSE, CLASS_NAME_SHOW$7);\n for (const trigger of this._triggerArray) {\n const element = SelectorEngine.getElementFromSelector(trigger);\n if (element && !this._isShown(element)) {\n this._addAriaAndCollapsedClass([trigger], false);\n }\n }\n this._isTransitioning = true;\n const complete = () => {\n this._isTransitioning = false;\n this._element.classList.remove(CLASS_NAME_COLLAPSING);\n this._element.classList.add(CLASS_NAME_COLLAPSE);\n EventHandler.trigger(this._element, EVENT_HIDDEN$6);\n };\n this._element.style[dimension] = '';\n this._queueCallback(complete, this._element, true);\n }\n _isShown(element = this._element) {\n return element.classList.contains(CLASS_NAME_SHOW$7);\n }\n\n // Private\n _configAfterMerge(config) {\n config.toggle = Boolean(config.toggle); // Coerce string values\n config.parent = getElement(config.parent);\n return config;\n }\n _getDimension() {\n return this._element.classList.contains(CLASS_NAME_HORIZONTAL) ? WIDTH : HEIGHT;\n }\n _initializeChildren() {\n if (!this._config.parent) {\n return;\n }\n const children = this._getFirstLevelChildren(SELECTOR_DATA_TOGGLE$4);\n for (const element of children) {\n const selected = SelectorEngine.getElementFromSelector(element);\n if (selected) {\n this._addAriaAndCollapsedClass([element], this._isShown(selected));\n }\n }\n }\n _getFirstLevelChildren(selector) {\n const children = SelectorEngine.find(CLASS_NAME_DEEPER_CHILDREN, this._config.parent);\n // remove children if greater depth\n return SelectorEngine.find(selector, this._config.parent).filter(element => !children.includes(element));\n }\n _addAriaAndCollapsedClass(triggerArray, isOpen) {\n if (!triggerArray.length) {\n return;\n }\n for (const element of triggerArray) {\n element.classList.toggle(CLASS_NAME_COLLAPSED, !isOpen);\n element.setAttribute('aria-expanded', isOpen);\n }\n }\n\n // Static\n static jQueryInterface(config) {\n const _config = {};\n if (typeof config === 'string' && /show|hide/.test(config)) {\n _config.toggle = false;\n }\n return this.each(function () {\n const data = Collapse.getOrCreateInstance(this, _config);\n if (typeof config === 'string') {\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n }\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$4, SELECTOR_DATA_TOGGLE$4, function (event) {\n // preventDefault only for elements (which change the URL) not inside the collapsible element\n if (event.target.tagName === 'A' || event.delegateTarget && event.delegateTarget.tagName === 'A') {\n event.preventDefault();\n }\n for (const element of SelectorEngine.getMultipleElementsFromSelector(this)) {\n Collapse.getOrCreateInstance(element, {\n toggle: false\n }).toggle();\n }\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Collapse);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap dropdown.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$a = 'dropdown';\nconst DATA_KEY$6 = 'bs.dropdown';\nconst EVENT_KEY$6 = `.${DATA_KEY$6}`;\nconst DATA_API_KEY$3 = '.data-api';\nconst ESCAPE_KEY$2 = 'Escape';\nconst TAB_KEY$1 = 'Tab';\nconst ARROW_UP_KEY$1 = 'ArrowUp';\nconst ARROW_DOWN_KEY$1 = 'ArrowDown';\nconst RIGHT_MOUSE_BUTTON = 2; // MouseEvent.button value for the secondary button, usually the right button\n\nconst EVENT_HIDE$5 = `hide${EVENT_KEY$6}`;\nconst EVENT_HIDDEN$5 = `hidden${EVENT_KEY$6}`;\nconst EVENT_SHOW$5 = `show${EVENT_KEY$6}`;\nconst EVENT_SHOWN$5 = `shown${EVENT_KEY$6}`;\nconst EVENT_CLICK_DATA_API$3 = `click${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYDOWN_DATA_API = `keydown${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst EVENT_KEYUP_DATA_API = `keyup${EVENT_KEY$6}${DATA_API_KEY$3}`;\nconst CLASS_NAME_SHOW$6 = 'show';\nconst CLASS_NAME_DROPUP = 'dropup';\nconst CLASS_NAME_DROPEND = 'dropend';\nconst CLASS_NAME_DROPSTART = 'dropstart';\nconst CLASS_NAME_DROPUP_CENTER = 'dropup-center';\nconst CLASS_NAME_DROPDOWN_CENTER = 'dropdown-center';\nconst SELECTOR_DATA_TOGGLE$3 = '[data-bs-toggle=\"dropdown\"]:not(.disabled):not(:disabled)';\nconst SELECTOR_DATA_TOGGLE_SHOWN = `${SELECTOR_DATA_TOGGLE$3}.${CLASS_NAME_SHOW$6}`;\nconst SELECTOR_MENU = '.dropdown-menu';\nconst SELECTOR_NAVBAR = '.navbar';\nconst SELECTOR_NAVBAR_NAV = '.navbar-nav';\nconst SELECTOR_VISIBLE_ITEMS = '.dropdown-menu .dropdown-item:not(.disabled):not(:disabled)';\nconst PLACEMENT_TOP = isRTL() ? 'top-end' : 'top-start';\nconst PLACEMENT_TOPEND = isRTL() ? 'top-start' : 'top-end';\nconst PLACEMENT_BOTTOM = isRTL() ? 'bottom-end' : 'bottom-start';\nconst PLACEMENT_BOTTOMEND = isRTL() ? 'bottom-start' : 'bottom-end';\nconst PLACEMENT_RIGHT = isRTL() ? 'left-start' : 'right-start';\nconst PLACEMENT_LEFT = isRTL() ? 'right-start' : 'left-start';\nconst PLACEMENT_TOPCENTER = 'top';\nconst PLACEMENT_BOTTOMCENTER = 'bottom';\nconst Default$9 = {\n autoClose: true,\n boundary: 'clippingParents',\n display: 'dynamic',\n offset: [0, 2],\n popperConfig: null,\n reference: 'toggle'\n};\nconst DefaultType$9 = {\n autoClose: '(boolean|string)',\n boundary: '(string|element)',\n display: 'string',\n offset: '(array|string|function)',\n popperConfig: '(null|object|function)',\n reference: '(string|element|object)'\n};\n\n/**\n * Class definition\n */\n\nclass Dropdown extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._popper = null;\n this._parent = this._element.parentNode; // dropdown wrapper\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n this._menu = SelectorEngine.next(this._element, SELECTOR_MENU)[0] || SelectorEngine.prev(this._element, SELECTOR_MENU)[0] || SelectorEngine.findOne(SELECTOR_MENU, this._parent);\n this._inNavbar = this._detectNavbar();\n }\n\n // Getters\n static get Default() {\n return Default$9;\n }\n static get DefaultType() {\n return DefaultType$9;\n }\n static get NAME() {\n return NAME$a;\n }\n\n // Public\n toggle() {\n return this._isShown() ? this.hide() : this.show();\n }\n show() {\n if (isDisabled(this._element) || this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$5, relatedTarget);\n if (showEvent.defaultPrevented) {\n return;\n }\n this._createPopper();\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement && !this._parent.closest(SELECTOR_NAVBAR_NAV)) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n this._element.focus();\n this._element.setAttribute('aria-expanded', true);\n this._menu.classList.add(CLASS_NAME_SHOW$6);\n this._element.classList.add(CLASS_NAME_SHOW$6);\n EventHandler.trigger(this._element, EVENT_SHOWN$5, relatedTarget);\n }\n hide() {\n if (isDisabled(this._element) || !this._isShown()) {\n return;\n }\n const relatedTarget = {\n relatedTarget: this._element\n };\n this._completeHide(relatedTarget);\n }\n dispose() {\n if (this._popper) {\n this._popper.destroy();\n }\n super.dispose();\n }\n update() {\n this._inNavbar = this._detectNavbar();\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Private\n _completeHide(relatedTarget) {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$5, relatedTarget);\n if (hideEvent.defaultPrevented) {\n return;\n }\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n if (this._popper) {\n this._popper.destroy();\n }\n this._menu.classList.remove(CLASS_NAME_SHOW$6);\n this._element.classList.remove(CLASS_NAME_SHOW$6);\n this._element.setAttribute('aria-expanded', 'false');\n Manipulator.removeDataAttribute(this._menu, 'popper');\n EventHandler.trigger(this._element, EVENT_HIDDEN$5, relatedTarget);\n }\n _getConfig(config) {\n config = super._getConfig(config);\n if (typeof config.reference === 'object' && !isElement(config.reference) && typeof config.reference.getBoundingClientRect !== 'function') {\n // Popper virtual elements require a getBoundingClientRect method\n throw new TypeError(`${NAME$a.toUpperCase()}: Option \"reference\" provided type \"object\" without a required \"getBoundingClientRect\" method.`);\n }\n return config;\n }\n _createPopper() {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s dropdowns require Popper (https://popper.js.org)');\n }\n let referenceElement = this._element;\n if (this._config.reference === 'parent') {\n referenceElement = this._parent;\n } else if (isElement(this._config.reference)) {\n referenceElement = getElement(this._config.reference);\n } else if (typeof this._config.reference === 'object') {\n referenceElement = this._config.reference;\n }\n const popperConfig = this._getPopperConfig();\n this._popper = Popper.createPopper(referenceElement, this._menu, popperConfig);\n }\n _isShown() {\n return this._menu.classList.contains(CLASS_NAME_SHOW$6);\n }\n _getPlacement() {\n const parentDropdown = this._parent;\n if (parentDropdown.classList.contains(CLASS_NAME_DROPEND)) {\n return PLACEMENT_RIGHT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPSTART)) {\n return PLACEMENT_LEFT;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP_CENTER)) {\n return PLACEMENT_TOPCENTER;\n }\n if (parentDropdown.classList.contains(CLASS_NAME_DROPDOWN_CENTER)) {\n return PLACEMENT_BOTTOMCENTER;\n }\n\n // We need to trim the value because custom properties can also include spaces\n const isEnd = getComputedStyle(this._menu).getPropertyValue('--bs-position').trim() === 'end';\n if (parentDropdown.classList.contains(CLASS_NAME_DROPUP)) {\n return isEnd ? PLACEMENT_TOPEND : PLACEMENT_TOP;\n }\n return isEnd ? PLACEMENT_BOTTOMEND : PLACEMENT_BOTTOM;\n }\n _detectNavbar() {\n return this._element.closest(SELECTOR_NAVBAR) !== null;\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _getPopperConfig() {\n const defaultBsPopperConfig = {\n placement: this._getPlacement(),\n modifiers: [{\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }]\n };\n\n // Disable Popper if we have a static display or Dropdown is in Navbar\n if (this._inNavbar || this._config.display === 'static') {\n Manipulator.setDataAttribute(this._menu, 'popper', 'static'); // TODO: v6 remove\n defaultBsPopperConfig.modifiers = [{\n name: 'applyStyles',\n enabled: false\n }];\n }\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _selectMenuItem({\n key,\n target\n }) {\n const items = SelectorEngine.find(SELECTOR_VISIBLE_ITEMS, this._menu).filter(element => isVisible(element));\n if (!items.length) {\n return;\n }\n\n // if target isn't included in items (e.g. when expanding the dropdown)\n // allow cycling to get the last item in case key equals ARROW_UP_KEY\n getNextActiveElement(items, target, key === ARROW_DOWN_KEY$1, !items.includes(target)).focus();\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Dropdown.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n static clearMenus(event) {\n if (event.button === RIGHT_MOUSE_BUTTON || event.type === 'keyup' && event.key !== TAB_KEY$1) {\n return;\n }\n const openToggles = SelectorEngine.find(SELECTOR_DATA_TOGGLE_SHOWN);\n for (const toggle of openToggles) {\n const context = Dropdown.getInstance(toggle);\n if (!context || context._config.autoClose === false) {\n continue;\n }\n const composedPath = event.composedPath();\n const isMenuTarget = composedPath.includes(context._menu);\n if (composedPath.includes(context._element) || context._config.autoClose === 'inside' && !isMenuTarget || context._config.autoClose === 'outside' && isMenuTarget) {\n continue;\n }\n\n // Tab navigation through the dropdown menu or events from contained inputs shouldn't close the menu\n if (context._menu.contains(event.target) && (event.type === 'keyup' && event.key === TAB_KEY$1 || /input|select|option|textarea|form/i.test(event.target.tagName))) {\n continue;\n }\n const relatedTarget = {\n relatedTarget: context._element\n };\n if (event.type === 'click') {\n relatedTarget.clickEvent = event;\n }\n context._completeHide(relatedTarget);\n }\n }\n static dataApiKeydownHandler(event) {\n // If not an UP | DOWN | ESCAPE key => not a dropdown command\n // If input/textarea && if key is other than ESCAPE => not a dropdown command\n\n const isInput = /input|textarea/i.test(event.target.tagName);\n const isEscapeEvent = event.key === ESCAPE_KEY$2;\n const isUpOrDownEvent = [ARROW_UP_KEY$1, ARROW_DOWN_KEY$1].includes(event.key);\n if (!isUpOrDownEvent && !isEscapeEvent) {\n return;\n }\n if (isInput && !isEscapeEvent) {\n return;\n }\n event.preventDefault();\n\n // TODO: v6 revert #37011 & change markup https://getbootstrap.com/docs/5.3/forms/input-group/\n const getToggleButton = this.matches(SELECTOR_DATA_TOGGLE$3) ? this : SelectorEngine.prev(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.next(this, SELECTOR_DATA_TOGGLE$3)[0] || SelectorEngine.findOne(SELECTOR_DATA_TOGGLE$3, event.delegateTarget.parentNode);\n const instance = Dropdown.getOrCreateInstance(getToggleButton);\n if (isUpOrDownEvent) {\n event.stopPropagation();\n instance.show();\n instance._selectMenuItem(event);\n return;\n }\n if (instance._isShown()) {\n // else is escape and we check if it is shown\n event.stopPropagation();\n instance.hide();\n getToggleButton.focus();\n }\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_DATA_TOGGLE$3, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_KEYDOWN_DATA_API, SELECTOR_MENU, Dropdown.dataApiKeydownHandler);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_KEYUP_DATA_API, Dropdown.clearMenus);\nEventHandler.on(document, EVENT_CLICK_DATA_API$3, SELECTOR_DATA_TOGGLE$3, function (event) {\n event.preventDefault();\n Dropdown.getOrCreateInstance(this).toggle();\n});\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Dropdown);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/backdrop.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$9 = 'backdrop';\nconst CLASS_NAME_FADE$4 = 'fade';\nconst CLASS_NAME_SHOW$5 = 'show';\nconst EVENT_MOUSEDOWN = `mousedown.bs.${NAME$9}`;\nconst Default$8 = {\n className: 'modal-backdrop',\n clickCallback: null,\n isAnimated: false,\n isVisible: true,\n // if false, we use the backdrop helper without adding any element to the dom\n rootElement: 'body' // give the choice to place backdrop under different elements\n};\n\nconst DefaultType$8 = {\n className: 'string',\n clickCallback: '(function|null)',\n isAnimated: 'boolean',\n isVisible: 'boolean',\n rootElement: '(element|string)'\n};\n\n/**\n * Class definition\n */\n\nclass Backdrop extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isAppended = false;\n this._element = null;\n }\n\n // Getters\n static get Default() {\n return Default$8;\n }\n static get DefaultType() {\n return DefaultType$8;\n }\n static get NAME() {\n return NAME$9;\n }\n\n // Public\n show(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._append();\n const element = this._getElement();\n if (this._config.isAnimated) {\n reflow(element);\n }\n element.classList.add(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n execute(callback);\n });\n }\n hide(callback) {\n if (!this._config.isVisible) {\n execute(callback);\n return;\n }\n this._getElement().classList.remove(CLASS_NAME_SHOW$5);\n this._emulateAnimation(() => {\n this.dispose();\n execute(callback);\n });\n }\n dispose() {\n if (!this._isAppended) {\n return;\n }\n EventHandler.off(this._element, EVENT_MOUSEDOWN);\n this._element.remove();\n this._isAppended = false;\n }\n\n // Private\n _getElement() {\n if (!this._element) {\n const backdrop = document.createElement('div');\n backdrop.className = this._config.className;\n if (this._config.isAnimated) {\n backdrop.classList.add(CLASS_NAME_FADE$4);\n }\n this._element = backdrop;\n }\n return this._element;\n }\n _configAfterMerge(config) {\n // use getElement() with the default \"body\" to get a fresh Element on each instantiation\n config.rootElement = getElement(config.rootElement);\n return config;\n }\n _append() {\n if (this._isAppended) {\n return;\n }\n const element = this._getElement();\n this._config.rootElement.append(element);\n EventHandler.on(element, EVENT_MOUSEDOWN, () => {\n execute(this._config.clickCallback);\n });\n this._isAppended = true;\n }\n _emulateAnimation(callback) {\n executeAfterTransition(callback, this._getElement(), this._config.isAnimated);\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/focustrap.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$8 = 'focustrap';\nconst DATA_KEY$5 = 'bs.focustrap';\nconst EVENT_KEY$5 = `.${DATA_KEY$5}`;\nconst EVENT_FOCUSIN$2 = `focusin${EVENT_KEY$5}`;\nconst EVENT_KEYDOWN_TAB = `keydown.tab${EVENT_KEY$5}`;\nconst TAB_KEY = 'Tab';\nconst TAB_NAV_FORWARD = 'forward';\nconst TAB_NAV_BACKWARD = 'backward';\nconst Default$7 = {\n autofocus: true,\n trapElement: null // The element to trap focus inside of\n};\n\nconst DefaultType$7 = {\n autofocus: 'boolean',\n trapElement: 'element'\n};\n\n/**\n * Class definition\n */\n\nclass FocusTrap extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n this._isActive = false;\n this._lastTabNavDirection = null;\n }\n\n // Getters\n static get Default() {\n return Default$7;\n }\n static get DefaultType() {\n return DefaultType$7;\n }\n static get NAME() {\n return NAME$8;\n }\n\n // Public\n activate() {\n if (this._isActive) {\n return;\n }\n if (this._config.autofocus) {\n this._config.trapElement.focus();\n }\n EventHandler.off(document, EVENT_KEY$5); // guard against infinite focus loop\n EventHandler.on(document, EVENT_FOCUSIN$2, event => this._handleFocusin(event));\n EventHandler.on(document, EVENT_KEYDOWN_TAB, event => this._handleKeydown(event));\n this._isActive = true;\n }\n deactivate() {\n if (!this._isActive) {\n return;\n }\n this._isActive = false;\n EventHandler.off(document, EVENT_KEY$5);\n }\n\n // Private\n _handleFocusin(event) {\n const {\n trapElement\n } = this._config;\n if (event.target === document || event.target === trapElement || trapElement.contains(event.target)) {\n return;\n }\n const elements = SelectorEngine.focusableChildren(trapElement);\n if (elements.length === 0) {\n trapElement.focus();\n } else if (this._lastTabNavDirection === TAB_NAV_BACKWARD) {\n elements[elements.length - 1].focus();\n } else {\n elements[0].focus();\n }\n }\n _handleKeydown(event) {\n if (event.key !== TAB_KEY) {\n return;\n }\n this._lastTabNavDirection = event.shiftKey ? TAB_NAV_BACKWARD : TAB_NAV_FORWARD;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/scrollBar.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst SELECTOR_FIXED_CONTENT = '.fixed-top, .fixed-bottom, .is-fixed, .sticky-top';\nconst SELECTOR_STICKY_CONTENT = '.sticky-top';\nconst PROPERTY_PADDING = 'padding-right';\nconst PROPERTY_MARGIN = 'margin-right';\n\n/**\n * Class definition\n */\n\nclass ScrollBarHelper {\n constructor() {\n this._element = document.body;\n }\n\n // Public\n getWidth() {\n // https://developer.mozilla.org/en-US/docs/Web/API/Window/innerWidth#usage_notes\n const documentWidth = document.documentElement.clientWidth;\n return Math.abs(window.innerWidth - documentWidth);\n }\n hide() {\n const width = this.getWidth();\n this._disableOverFlow();\n // give padding to element to balance the hidden scrollbar width\n this._setElementAttributes(this._element, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n // trick: We adjust positive paddingRight and negative marginRight to sticky-top elements to keep showing fullwidth\n this._setElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING, calculatedValue => calculatedValue + width);\n this._setElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN, calculatedValue => calculatedValue - width);\n }\n reset() {\n this._resetElementAttributes(this._element, 'overflow');\n this._resetElementAttributes(this._element, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_FIXED_CONTENT, PROPERTY_PADDING);\n this._resetElementAttributes(SELECTOR_STICKY_CONTENT, PROPERTY_MARGIN);\n }\n isOverflowing() {\n return this.getWidth() > 0;\n }\n\n // Private\n _disableOverFlow() {\n this._saveInitialAttribute(this._element, 'overflow');\n this._element.style.overflow = 'hidden';\n }\n _setElementAttributes(selector, styleProperty, callback) {\n const scrollbarWidth = this.getWidth();\n const manipulationCallBack = element => {\n if (element !== this._element && window.innerWidth > element.clientWidth + scrollbarWidth) {\n return;\n }\n this._saveInitialAttribute(element, styleProperty);\n const calculatedValue = window.getComputedStyle(element).getPropertyValue(styleProperty);\n element.style.setProperty(styleProperty, `${callback(Number.parseFloat(calculatedValue))}px`);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _saveInitialAttribute(element, styleProperty) {\n const actualValue = element.style.getPropertyValue(styleProperty);\n if (actualValue) {\n Manipulator.setDataAttribute(element, styleProperty, actualValue);\n }\n }\n _resetElementAttributes(selector, styleProperty) {\n const manipulationCallBack = element => {\n const value = Manipulator.getDataAttribute(element, styleProperty);\n // We only want to remove the property if the value is `null`; the value can also be zero\n if (value === null) {\n element.style.removeProperty(styleProperty);\n return;\n }\n Manipulator.removeDataAttribute(element, styleProperty);\n element.style.setProperty(styleProperty, value);\n };\n this._applyManipulationCallback(selector, manipulationCallBack);\n }\n _applyManipulationCallback(selector, callBack) {\n if (isElement(selector)) {\n callBack(selector);\n return;\n }\n for (const sel of SelectorEngine.find(selector, this._element)) {\n callBack(sel);\n }\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap modal.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$7 = 'modal';\nconst DATA_KEY$4 = 'bs.modal';\nconst EVENT_KEY$4 = `.${DATA_KEY$4}`;\nconst DATA_API_KEY$2 = '.data-api';\nconst ESCAPE_KEY$1 = 'Escape';\nconst EVENT_HIDE$4 = `hide${EVENT_KEY$4}`;\nconst EVENT_HIDE_PREVENTED$1 = `hidePrevented${EVENT_KEY$4}`;\nconst EVENT_HIDDEN$4 = `hidden${EVENT_KEY$4}`;\nconst EVENT_SHOW$4 = `show${EVENT_KEY$4}`;\nconst EVENT_SHOWN$4 = `shown${EVENT_KEY$4}`;\nconst EVENT_RESIZE$1 = `resize${EVENT_KEY$4}`;\nconst EVENT_CLICK_DISMISS = `click.dismiss${EVENT_KEY$4}`;\nconst EVENT_MOUSEDOWN_DISMISS = `mousedown.dismiss${EVENT_KEY$4}`;\nconst EVENT_KEYDOWN_DISMISS$1 = `keydown.dismiss${EVENT_KEY$4}`;\nconst EVENT_CLICK_DATA_API$2 = `click${EVENT_KEY$4}${DATA_API_KEY$2}`;\nconst CLASS_NAME_OPEN = 'modal-open';\nconst CLASS_NAME_FADE$3 = 'fade';\nconst CLASS_NAME_SHOW$4 = 'show';\nconst CLASS_NAME_STATIC = 'modal-static';\nconst OPEN_SELECTOR$1 = '.modal.show';\nconst SELECTOR_DIALOG = '.modal-dialog';\nconst SELECTOR_MODAL_BODY = '.modal-body';\nconst SELECTOR_DATA_TOGGLE$2 = '[data-bs-toggle=\"modal\"]';\nconst Default$6 = {\n backdrop: true,\n focus: true,\n keyboard: true\n};\nconst DefaultType$6 = {\n backdrop: '(boolean|string)',\n focus: 'boolean',\n keyboard: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Modal extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._dialog = SelectorEngine.findOne(SELECTOR_DIALOG, this._element);\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._isShown = false;\n this._isTransitioning = false;\n this._scrollBar = new ScrollBarHelper();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$6;\n }\n static get DefaultType() {\n return DefaultType$6;\n }\n static get NAME() {\n return NAME$7;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown || this._isTransitioning) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$4, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._isTransitioning = true;\n this._scrollBar.hide();\n document.body.classList.add(CLASS_NAME_OPEN);\n this._adjustDialog();\n this._backdrop.show(() => this._showElement(relatedTarget));\n }\n hide() {\n if (!this._isShown || this._isTransitioning) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$4);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._isShown = false;\n this._isTransitioning = true;\n this._focustrap.deactivate();\n this._element.classList.remove(CLASS_NAME_SHOW$4);\n this._queueCallback(() => this._hideModal(), this._element, this._isAnimated());\n }\n dispose() {\n EventHandler.off(window, EVENT_KEY$4);\n EventHandler.off(this._dialog, EVENT_KEY$4);\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n handleUpdate() {\n this._adjustDialog();\n }\n\n // Private\n _initializeBackDrop() {\n return new Backdrop({\n isVisible: Boolean(this._config.backdrop),\n // 'static' option will be translated to true, and booleans will keep their value,\n isAnimated: this._isAnimated()\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _showElement(relatedTarget) {\n // try to append dynamic modal\n if (!document.body.contains(this._element)) {\n document.body.append(this._element);\n }\n this._element.style.display = 'block';\n this._element.removeAttribute('aria-hidden');\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.scrollTop = 0;\n const modalBody = SelectorEngine.findOne(SELECTOR_MODAL_BODY, this._dialog);\n if (modalBody) {\n modalBody.scrollTop = 0;\n }\n reflow(this._element);\n this._element.classList.add(CLASS_NAME_SHOW$4);\n const transitionComplete = () => {\n if (this._config.focus) {\n this._focustrap.activate();\n }\n this._isTransitioning = false;\n EventHandler.trigger(this._element, EVENT_SHOWN$4, {\n relatedTarget\n });\n };\n this._queueCallback(transitionComplete, this._dialog, this._isAnimated());\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS$1, event => {\n if (event.key !== ESCAPE_KEY$1) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n this._triggerBackdropTransition();\n });\n EventHandler.on(window, EVENT_RESIZE$1, () => {\n if (this._isShown && !this._isTransitioning) {\n this._adjustDialog();\n }\n });\n EventHandler.on(this._element, EVENT_MOUSEDOWN_DISMISS, event => {\n // a bad trick to segregate clicks that may start inside dialog but end outside, and avoid listen to scrollbar clicks\n EventHandler.one(this._element, EVENT_CLICK_DISMISS, event2 => {\n if (this._element !== event.target || this._element !== event2.target) {\n return;\n }\n if (this._config.backdrop === 'static') {\n this._triggerBackdropTransition();\n return;\n }\n if (this._config.backdrop) {\n this.hide();\n }\n });\n });\n }\n _hideModal() {\n this._element.style.display = 'none';\n this._element.setAttribute('aria-hidden', true);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n this._isTransitioning = false;\n this._backdrop.hide(() => {\n document.body.classList.remove(CLASS_NAME_OPEN);\n this._resetAdjustments();\n this._scrollBar.reset();\n EventHandler.trigger(this._element, EVENT_HIDDEN$4);\n });\n }\n _isAnimated() {\n return this._element.classList.contains(CLASS_NAME_FADE$3);\n }\n _triggerBackdropTransition() {\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED$1);\n if (hideEvent.defaultPrevented) {\n return;\n }\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const initialOverflowY = this._element.style.overflowY;\n // return if the following background transition hasn't yet completed\n if (initialOverflowY === 'hidden' || this._element.classList.contains(CLASS_NAME_STATIC)) {\n return;\n }\n if (!isModalOverflowing) {\n this._element.style.overflowY = 'hidden';\n }\n this._element.classList.add(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.classList.remove(CLASS_NAME_STATIC);\n this._queueCallback(() => {\n this._element.style.overflowY = initialOverflowY;\n }, this._dialog);\n }, this._dialog);\n this._element.focus();\n }\n\n /**\n * The following methods are used to handle overflowing modals\n */\n\n _adjustDialog() {\n const isModalOverflowing = this._element.scrollHeight > document.documentElement.clientHeight;\n const scrollbarWidth = this._scrollBar.getWidth();\n const isBodyOverflowing = scrollbarWidth > 0;\n if (isBodyOverflowing && !isModalOverflowing) {\n const property = isRTL() ? 'paddingLeft' : 'paddingRight';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n if (!isBodyOverflowing && isModalOverflowing) {\n const property = isRTL() ? 'paddingRight' : 'paddingLeft';\n this._element.style[property] = `${scrollbarWidth}px`;\n }\n }\n _resetAdjustments() {\n this._element.style.paddingLeft = '';\n this._element.style.paddingRight = '';\n }\n\n // Static\n static jQueryInterface(config, relatedTarget) {\n return this.each(function () {\n const data = Modal.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](relatedTarget);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$2, SELECTOR_DATA_TOGGLE$2, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n EventHandler.one(target, EVENT_SHOW$4, showEvent => {\n if (showEvent.defaultPrevented) {\n // only register focus restorer if modal will actually get shown\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$4, () => {\n if (isVisible(this)) {\n this.focus();\n }\n });\n });\n\n // avoid conflict when clicking modal toggler while another one is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR$1);\n if (alreadyOpen) {\n Modal.getInstance(alreadyOpen).hide();\n }\n const data = Modal.getOrCreateInstance(target);\n data.toggle(this);\n});\nenableDismissTrigger(Modal);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Modal);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap offcanvas.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$6 = 'offcanvas';\nconst DATA_KEY$3 = 'bs.offcanvas';\nconst EVENT_KEY$3 = `.${DATA_KEY$3}`;\nconst DATA_API_KEY$1 = '.data-api';\nconst EVENT_LOAD_DATA_API$2 = `load${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst ESCAPE_KEY = 'Escape';\nconst CLASS_NAME_SHOW$3 = 'show';\nconst CLASS_NAME_SHOWING$1 = 'showing';\nconst CLASS_NAME_HIDING = 'hiding';\nconst CLASS_NAME_BACKDROP = 'offcanvas-backdrop';\nconst OPEN_SELECTOR = '.offcanvas.show';\nconst EVENT_SHOW$3 = `show${EVENT_KEY$3}`;\nconst EVENT_SHOWN$3 = `shown${EVENT_KEY$3}`;\nconst EVENT_HIDE$3 = `hide${EVENT_KEY$3}`;\nconst EVENT_HIDE_PREVENTED = `hidePrevented${EVENT_KEY$3}`;\nconst EVENT_HIDDEN$3 = `hidden${EVENT_KEY$3}`;\nconst EVENT_RESIZE = `resize${EVENT_KEY$3}`;\nconst EVENT_CLICK_DATA_API$1 = `click${EVENT_KEY$3}${DATA_API_KEY$1}`;\nconst EVENT_KEYDOWN_DISMISS = `keydown.dismiss${EVENT_KEY$3}`;\nconst SELECTOR_DATA_TOGGLE$1 = '[data-bs-toggle=\"offcanvas\"]';\nconst Default$5 = {\n backdrop: true,\n keyboard: true,\n scroll: false\n};\nconst DefaultType$5 = {\n backdrop: '(boolean|string)',\n keyboard: 'boolean',\n scroll: 'boolean'\n};\n\n/**\n * Class definition\n */\n\nclass Offcanvas extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n this._isShown = false;\n this._backdrop = this._initializeBackDrop();\n this._focustrap = this._initializeFocusTrap();\n this._addEventListeners();\n }\n\n // Getters\n static get Default() {\n return Default$5;\n }\n static get DefaultType() {\n return DefaultType$5;\n }\n static get NAME() {\n return NAME$6;\n }\n\n // Public\n toggle(relatedTarget) {\n return this._isShown ? this.hide() : this.show(relatedTarget);\n }\n show(relatedTarget) {\n if (this._isShown) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, EVENT_SHOW$3, {\n relatedTarget\n });\n if (showEvent.defaultPrevented) {\n return;\n }\n this._isShown = true;\n this._backdrop.show();\n if (!this._config.scroll) {\n new ScrollBarHelper().hide();\n }\n this._element.setAttribute('aria-modal', true);\n this._element.setAttribute('role', 'dialog');\n this._element.classList.add(CLASS_NAME_SHOWING$1);\n const completeCallBack = () => {\n if (!this._config.scroll || this._config.backdrop) {\n this._focustrap.activate();\n }\n this._element.classList.add(CLASS_NAME_SHOW$3);\n this._element.classList.remove(CLASS_NAME_SHOWING$1);\n EventHandler.trigger(this._element, EVENT_SHOWN$3, {\n relatedTarget\n });\n };\n this._queueCallback(completeCallBack, this._element, true);\n }\n hide() {\n if (!this._isShown) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, EVENT_HIDE$3);\n if (hideEvent.defaultPrevented) {\n return;\n }\n this._focustrap.deactivate();\n this._element.blur();\n this._isShown = false;\n this._element.classList.add(CLASS_NAME_HIDING);\n this._backdrop.hide();\n const completeCallback = () => {\n this._element.classList.remove(CLASS_NAME_SHOW$3, CLASS_NAME_HIDING);\n this._element.removeAttribute('aria-modal');\n this._element.removeAttribute('role');\n if (!this._config.scroll) {\n new ScrollBarHelper().reset();\n }\n EventHandler.trigger(this._element, EVENT_HIDDEN$3);\n };\n this._queueCallback(completeCallback, this._element, true);\n }\n dispose() {\n this._backdrop.dispose();\n this._focustrap.deactivate();\n super.dispose();\n }\n\n // Private\n _initializeBackDrop() {\n const clickCallback = () => {\n if (this._config.backdrop === 'static') {\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n return;\n }\n this.hide();\n };\n\n // 'static' option will be translated to true, and booleans will keep their value\n const isVisible = Boolean(this._config.backdrop);\n return new Backdrop({\n className: CLASS_NAME_BACKDROP,\n isVisible,\n isAnimated: true,\n rootElement: this._element.parentNode,\n clickCallback: isVisible ? clickCallback : null\n });\n }\n _initializeFocusTrap() {\n return new FocusTrap({\n trapElement: this._element\n });\n }\n _addEventListeners() {\n EventHandler.on(this._element, EVENT_KEYDOWN_DISMISS, event => {\n if (event.key !== ESCAPE_KEY) {\n return;\n }\n if (this._config.keyboard) {\n this.hide();\n return;\n }\n EventHandler.trigger(this._element, EVENT_HIDE_PREVENTED);\n });\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Offcanvas.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (data[config] === undefined || config.startsWith('_') || config === 'constructor') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config](this);\n });\n }\n}\n\n/**\n * Data API implementation\n */\n\nEventHandler.on(document, EVENT_CLICK_DATA_API$1, SELECTOR_DATA_TOGGLE$1, function (event) {\n const target = SelectorEngine.getElementFromSelector(this);\n if (['A', 'AREA'].includes(this.tagName)) {\n event.preventDefault();\n }\n if (isDisabled(this)) {\n return;\n }\n EventHandler.one(target, EVENT_HIDDEN$3, () => {\n // focus on trigger when it is closed\n if (isVisible(this)) {\n this.focus();\n }\n });\n\n // avoid conflict when clicking a toggler of an offcanvas, while another is open\n const alreadyOpen = SelectorEngine.findOne(OPEN_SELECTOR);\n if (alreadyOpen && alreadyOpen !== target) {\n Offcanvas.getInstance(alreadyOpen).hide();\n }\n const data = Offcanvas.getOrCreateInstance(target);\n data.toggle(this);\n});\nEventHandler.on(window, EVENT_LOAD_DATA_API$2, () => {\n for (const selector of SelectorEngine.find(OPEN_SELECTOR)) {\n Offcanvas.getOrCreateInstance(selector).show();\n }\n});\nEventHandler.on(window, EVENT_RESIZE, () => {\n for (const element of SelectorEngine.find('[aria-modal][class*=show][class*=offcanvas-]')) {\n if (getComputedStyle(element).position !== 'fixed') {\n Offcanvas.getOrCreateInstance(element).hide();\n }\n }\n});\nenableDismissTrigger(Offcanvas);\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Offcanvas);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/sanitizer.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n// js-docs-start allow-list\nconst ARIA_ATTRIBUTE_PATTERN = /^aria-[\\w-]*$/i;\nconst DefaultAllowlist = {\n // Global attributes allowed on any supplied element below.\n '*': ['class', 'dir', 'id', 'lang', 'role', ARIA_ATTRIBUTE_PATTERN],\n a: ['target', 'href', 'title', 'rel'],\n area: [],\n b: [],\n br: [],\n col: [],\n code: [],\n div: [],\n em: [],\n hr: [],\n h1: [],\n h2: [],\n h3: [],\n h4: [],\n h5: [],\n h6: [],\n i: [],\n img: ['src', 'srcset', 'alt', 'title', 'width', 'height'],\n li: [],\n ol: [],\n p: [],\n pre: [],\n s: [],\n small: [],\n span: [],\n sub: [],\n sup: [],\n strong: [],\n u: [],\n ul: []\n};\n// js-docs-end allow-list\n\nconst uriAttributes = new Set(['background', 'cite', 'href', 'itemtype', 'longdesc', 'poster', 'src', 'xlink:href']);\n\n/**\n * A pattern that recognizes URLs that are safe wrt. XSS in URL navigation\n * contexts.\n *\n * Shout-out to Angular https://github.com/angular/angular/blob/15.2.8/packages/core/src/sanitization/url_sanitizer.ts#L38\n */\n// eslint-disable-next-line unicorn/better-regex\nconst SAFE_URL_PATTERN = /^(?!javascript:)(?:[a-z0-9+.-]+:|[^&:/?#]*(?:[/?#]|$))/i;\nconst allowedAttribute = (attribute, allowedAttributeList) => {\n const attributeName = attribute.nodeName.toLowerCase();\n if (allowedAttributeList.includes(attributeName)) {\n if (uriAttributes.has(attributeName)) {\n return Boolean(SAFE_URL_PATTERN.test(attribute.nodeValue));\n }\n return true;\n }\n\n // Check if a regular expression validates the attribute.\n return allowedAttributeList.filter(attributeRegex => attributeRegex instanceof RegExp).some(regex => regex.test(attributeName));\n};\nfunction sanitizeHtml(unsafeHtml, allowList, sanitizeFunction) {\n if (!unsafeHtml.length) {\n return unsafeHtml;\n }\n if (sanitizeFunction && typeof sanitizeFunction === 'function') {\n return sanitizeFunction(unsafeHtml);\n }\n const domParser = new window.DOMParser();\n const createdDocument = domParser.parseFromString(unsafeHtml, 'text/html');\n const elements = [].concat(...createdDocument.body.querySelectorAll('*'));\n for (const element of elements) {\n const elementName = element.nodeName.toLowerCase();\n if (!Object.keys(allowList).includes(elementName)) {\n element.remove();\n continue;\n }\n const attributeList = [].concat(...element.attributes);\n const allowedAttributes = [].concat(allowList['*'] || [], allowList[elementName] || []);\n for (const attribute of attributeList) {\n if (!allowedAttribute(attribute, allowedAttributes)) {\n element.removeAttribute(attribute.nodeName);\n }\n }\n }\n return createdDocument.body.innerHTML;\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap util/template-factory.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$5 = 'TemplateFactory';\nconst Default$4 = {\n allowList: DefaultAllowlist,\n content: {},\n // { selector : text , selector2 : text2 , }\n extraClass: '',\n html: false,\n sanitize: true,\n sanitizeFn: null,\n template: '
'\n};\nconst DefaultType$4 = {\n allowList: 'object',\n content: 'object',\n extraClass: '(string|function)',\n html: 'boolean',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n template: 'string'\n};\nconst DefaultContentType = {\n entry: '(string|element|function|null)',\n selector: '(string|element)'\n};\n\n/**\n * Class definition\n */\n\nclass TemplateFactory extends Config {\n constructor(config) {\n super();\n this._config = this._getConfig(config);\n }\n\n // Getters\n static get Default() {\n return Default$4;\n }\n static get DefaultType() {\n return DefaultType$4;\n }\n static get NAME() {\n return NAME$5;\n }\n\n // Public\n getContent() {\n return Object.values(this._config.content).map(config => this._resolvePossibleFunction(config)).filter(Boolean);\n }\n hasContent() {\n return this.getContent().length > 0;\n }\n changeContent(content) {\n this._checkContent(content);\n this._config.content = {\n ...this._config.content,\n ...content\n };\n return this;\n }\n toHtml() {\n const templateWrapper = document.createElement('div');\n templateWrapper.innerHTML = this._maybeSanitize(this._config.template);\n for (const [selector, text] of Object.entries(this._config.content)) {\n this._setContent(templateWrapper, text, selector);\n }\n const template = templateWrapper.children[0];\n const extraClass = this._resolvePossibleFunction(this._config.extraClass);\n if (extraClass) {\n template.classList.add(...extraClass.split(' '));\n }\n return template;\n }\n\n // Private\n _typeCheckConfig(config) {\n super._typeCheckConfig(config);\n this._checkContent(config.content);\n }\n _checkContent(arg) {\n for (const [selector, content] of Object.entries(arg)) {\n super._typeCheckConfig({\n selector,\n entry: content\n }, DefaultContentType);\n }\n }\n _setContent(template, content, selector) {\n const templateElement = SelectorEngine.findOne(selector, template);\n if (!templateElement) {\n return;\n }\n content = this._resolvePossibleFunction(content);\n if (!content) {\n templateElement.remove();\n return;\n }\n if (isElement(content)) {\n this._putElementInTemplate(getElement(content), templateElement);\n return;\n }\n if (this._config.html) {\n templateElement.innerHTML = this._maybeSanitize(content);\n return;\n }\n templateElement.textContent = content;\n }\n _maybeSanitize(arg) {\n return this._config.sanitize ? sanitizeHtml(arg, this._config.allowList, this._config.sanitizeFn) : arg;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this]);\n }\n _putElementInTemplate(element, templateElement) {\n if (this._config.html) {\n templateElement.innerHTML = '';\n templateElement.append(element);\n return;\n }\n templateElement.textContent = element.textContent;\n }\n}\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap tooltip.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$4 = 'tooltip';\nconst DISALLOWED_ATTRIBUTES = new Set(['sanitize', 'allowList', 'sanitizeFn']);\nconst CLASS_NAME_FADE$2 = 'fade';\nconst CLASS_NAME_MODAL = 'modal';\nconst CLASS_NAME_SHOW$2 = 'show';\nconst SELECTOR_TOOLTIP_INNER = '.tooltip-inner';\nconst SELECTOR_MODAL = `.${CLASS_NAME_MODAL}`;\nconst EVENT_MODAL_HIDE = 'hide.bs.modal';\nconst TRIGGER_HOVER = 'hover';\nconst TRIGGER_FOCUS = 'focus';\nconst TRIGGER_CLICK = 'click';\nconst TRIGGER_MANUAL = 'manual';\nconst EVENT_HIDE$2 = 'hide';\nconst EVENT_HIDDEN$2 = 'hidden';\nconst EVENT_SHOW$2 = 'show';\nconst EVENT_SHOWN$2 = 'shown';\nconst EVENT_INSERTED = 'inserted';\nconst EVENT_CLICK$1 = 'click';\nconst EVENT_FOCUSIN$1 = 'focusin';\nconst EVENT_FOCUSOUT$1 = 'focusout';\nconst EVENT_MOUSEENTER = 'mouseenter';\nconst EVENT_MOUSELEAVE = 'mouseleave';\nconst AttachmentMap = {\n AUTO: 'auto',\n TOP: 'top',\n RIGHT: isRTL() ? 'left' : 'right',\n BOTTOM: 'bottom',\n LEFT: isRTL() ? 'right' : 'left'\n};\nconst Default$3 = {\n allowList: DefaultAllowlist,\n animation: true,\n boundary: 'clippingParents',\n container: false,\n customClass: '',\n delay: 0,\n fallbackPlacements: ['top', 'right', 'bottom', 'left'],\n html: false,\n offset: [0, 6],\n placement: 'top',\n popperConfig: null,\n sanitize: true,\n sanitizeFn: null,\n selector: false,\n template: '
' + '
' + '
' + '
',\n title: '',\n trigger: 'hover focus'\n};\nconst DefaultType$3 = {\n allowList: 'object',\n animation: 'boolean',\n boundary: '(string|element)',\n container: '(string|element|boolean)',\n customClass: '(string|function)',\n delay: '(number|object)',\n fallbackPlacements: 'array',\n html: 'boolean',\n offset: '(array|string|function)',\n placement: '(string|function)',\n popperConfig: '(null|object|function)',\n sanitize: 'boolean',\n sanitizeFn: '(null|function)',\n selector: '(string|boolean)',\n template: 'string',\n title: '(string|element|function)',\n trigger: 'string'\n};\n\n/**\n * Class definition\n */\n\nclass Tooltip extends BaseComponent {\n constructor(element, config) {\n if (typeof Popper === 'undefined') {\n throw new TypeError('Bootstrap\\'s tooltips require Popper (https://popper.js.org)');\n }\n super(element, config);\n\n // Private\n this._isEnabled = true;\n this._timeout = 0;\n this._isHovered = null;\n this._activeTrigger = {};\n this._popper = null;\n this._templateFactory = null;\n this._newContent = null;\n\n // Protected\n this.tip = null;\n this._setListeners();\n if (!this._config.selector) {\n this._fixTitle();\n }\n }\n\n // Getters\n static get Default() {\n return Default$3;\n }\n static get DefaultType() {\n return DefaultType$3;\n }\n static get NAME() {\n return NAME$4;\n }\n\n // Public\n enable() {\n this._isEnabled = true;\n }\n disable() {\n this._isEnabled = false;\n }\n toggleEnabled() {\n this._isEnabled = !this._isEnabled;\n }\n toggle() {\n if (!this._isEnabled) {\n return;\n }\n this._activeTrigger.click = !this._activeTrigger.click;\n if (this._isShown()) {\n this._leave();\n return;\n }\n this._enter();\n }\n dispose() {\n clearTimeout(this._timeout);\n EventHandler.off(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n if (this._element.getAttribute('data-bs-original-title')) {\n this._element.setAttribute('title', this._element.getAttribute('data-bs-original-title'));\n }\n this._disposePopper();\n super.dispose();\n }\n show() {\n if (this._element.style.display === 'none') {\n throw new Error('Please use show on visible elements');\n }\n if (!(this._isWithContent() && this._isEnabled)) {\n return;\n }\n const showEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOW$2));\n const shadowRoot = findShadowRoot(this._element);\n const isInTheDom = (shadowRoot || this._element.ownerDocument.documentElement).contains(this._element);\n if (showEvent.defaultPrevented || !isInTheDom) {\n return;\n }\n\n // TODO: v6 remove this or make it optional\n this._disposePopper();\n const tip = this._getTipElement();\n this._element.setAttribute('aria-describedby', tip.getAttribute('id'));\n const {\n container\n } = this._config;\n if (!this._element.ownerDocument.documentElement.contains(this.tip)) {\n container.append(tip);\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_INSERTED));\n }\n this._popper = this._createPopper(tip);\n tip.classList.add(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we add extra\n // empty mouseover listeners to the body's immediate children;\n // only needed because of broken event delegation on iOS\n // https://www.quirksmode.org/blog/archives/2014/02/mouse_event_bub.html\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.on(element, 'mouseover', noop);\n }\n }\n const complete = () => {\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_SHOWN$2));\n if (this._isHovered === false) {\n this._leave();\n }\n this._isHovered = false;\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n hide() {\n if (!this._isShown()) {\n return;\n }\n const hideEvent = EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDE$2));\n if (hideEvent.defaultPrevented) {\n return;\n }\n const tip = this._getTipElement();\n tip.classList.remove(CLASS_NAME_SHOW$2);\n\n // If this is a touch-enabled device we remove the extra\n // empty mouseover listeners we added for iOS support\n if ('ontouchstart' in document.documentElement) {\n for (const element of [].concat(...document.body.children)) {\n EventHandler.off(element, 'mouseover', noop);\n }\n }\n this._activeTrigger[TRIGGER_CLICK] = false;\n this._activeTrigger[TRIGGER_FOCUS] = false;\n this._activeTrigger[TRIGGER_HOVER] = false;\n this._isHovered = null; // it is a trick to support manual triggering\n\n const complete = () => {\n if (this._isWithActiveTrigger()) {\n return;\n }\n if (!this._isHovered) {\n this._disposePopper();\n }\n this._element.removeAttribute('aria-describedby');\n EventHandler.trigger(this._element, this.constructor.eventName(EVENT_HIDDEN$2));\n };\n this._queueCallback(complete, this.tip, this._isAnimated());\n }\n update() {\n if (this._popper) {\n this._popper.update();\n }\n }\n\n // Protected\n _isWithContent() {\n return Boolean(this._getTitle());\n }\n _getTipElement() {\n if (!this.tip) {\n this.tip = this._createTipElement(this._newContent || this._getContentForTemplate());\n }\n return this.tip;\n }\n _createTipElement(content) {\n const tip = this._getTemplateFactory(content).toHtml();\n\n // TODO: remove this check in v6\n if (!tip) {\n return null;\n }\n tip.classList.remove(CLASS_NAME_FADE$2, CLASS_NAME_SHOW$2);\n // TODO: v6 the following can be achieved with CSS only\n tip.classList.add(`bs-${this.constructor.NAME}-auto`);\n const tipId = getUID(this.constructor.NAME).toString();\n tip.setAttribute('id', tipId);\n if (this._isAnimated()) {\n tip.classList.add(CLASS_NAME_FADE$2);\n }\n return tip;\n }\n setContent(content) {\n this._newContent = content;\n if (this._isShown()) {\n this._disposePopper();\n this.show();\n }\n }\n _getTemplateFactory(content) {\n if (this._templateFactory) {\n this._templateFactory.changeContent(content);\n } else {\n this._templateFactory = new TemplateFactory({\n ...this._config,\n // the `content` var has to be after `this._config`\n // to override config.content in case of popover\n content,\n extraClass: this._resolvePossibleFunction(this._config.customClass)\n });\n }\n return this._templateFactory;\n }\n _getContentForTemplate() {\n return {\n [SELECTOR_TOOLTIP_INNER]: this._getTitle()\n };\n }\n _getTitle() {\n return this._resolvePossibleFunction(this._config.title) || this._element.getAttribute('data-bs-original-title');\n }\n\n // Private\n _initializeOnDelegatedTarget(event) {\n return this.constructor.getOrCreateInstance(event.delegateTarget, this._getDelegateConfig());\n }\n _isAnimated() {\n return this._config.animation || this.tip && this.tip.classList.contains(CLASS_NAME_FADE$2);\n }\n _isShown() {\n return this.tip && this.tip.classList.contains(CLASS_NAME_SHOW$2);\n }\n _createPopper(tip) {\n const placement = execute(this._config.placement, [this, tip, this._element]);\n const attachment = AttachmentMap[placement.toUpperCase()];\n return Popper.createPopper(this._element, tip, this._getPopperConfig(attachment));\n }\n _getOffset() {\n const {\n offset\n } = this._config;\n if (typeof offset === 'string') {\n return offset.split(',').map(value => Number.parseInt(value, 10));\n }\n if (typeof offset === 'function') {\n return popperData => offset(popperData, this._element);\n }\n return offset;\n }\n _resolvePossibleFunction(arg) {\n return execute(arg, [this._element]);\n }\n _getPopperConfig(attachment) {\n const defaultBsPopperConfig = {\n placement: attachment,\n modifiers: [{\n name: 'flip',\n options: {\n fallbackPlacements: this._config.fallbackPlacements\n }\n }, {\n name: 'offset',\n options: {\n offset: this._getOffset()\n }\n }, {\n name: 'preventOverflow',\n options: {\n boundary: this._config.boundary\n }\n }, {\n name: 'arrow',\n options: {\n element: `.${this.constructor.NAME}-arrow`\n }\n }, {\n name: 'preSetPlacement',\n enabled: true,\n phase: 'beforeMain',\n fn: data => {\n // Pre-set Popper's placement attribute in order to read the arrow sizes properly.\n // Otherwise, Popper mixes up the width and height dimensions since the initial arrow style is for top placement\n this._getTipElement().setAttribute('data-popper-placement', data.state.placement);\n }\n }]\n };\n return {\n ...defaultBsPopperConfig,\n ...execute(this._config.popperConfig, [defaultBsPopperConfig])\n };\n }\n _setListeners() {\n const triggers = this._config.trigger.split(' ');\n for (const trigger of triggers) {\n if (trigger === 'click') {\n EventHandler.on(this._element, this.constructor.eventName(EVENT_CLICK$1), this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context.toggle();\n });\n } else if (trigger !== TRIGGER_MANUAL) {\n const eventIn = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSEENTER) : this.constructor.eventName(EVENT_FOCUSIN$1);\n const eventOut = trigger === TRIGGER_HOVER ? this.constructor.eventName(EVENT_MOUSELEAVE) : this.constructor.eventName(EVENT_FOCUSOUT$1);\n EventHandler.on(this._element, eventIn, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusin' ? TRIGGER_FOCUS : TRIGGER_HOVER] = true;\n context._enter();\n });\n EventHandler.on(this._element, eventOut, this._config.selector, event => {\n const context = this._initializeOnDelegatedTarget(event);\n context._activeTrigger[event.type === 'focusout' ? TRIGGER_FOCUS : TRIGGER_HOVER] = context._element.contains(event.relatedTarget);\n context._leave();\n });\n }\n }\n this._hideModalHandler = () => {\n if (this._element) {\n this.hide();\n }\n };\n EventHandler.on(this._element.closest(SELECTOR_MODAL), EVENT_MODAL_HIDE, this._hideModalHandler);\n }\n _fixTitle() {\n const title = this._element.getAttribute('title');\n if (!title) {\n return;\n }\n if (!this._element.getAttribute('aria-label') && !this._element.textContent.trim()) {\n this._element.setAttribute('aria-label', title);\n }\n this._element.setAttribute('data-bs-original-title', title); // DO NOT USE IT. Is only for backwards compatibility\n this._element.removeAttribute('title');\n }\n _enter() {\n if (this._isShown() || this._isHovered) {\n this._isHovered = true;\n return;\n }\n this._isHovered = true;\n this._setTimeout(() => {\n if (this._isHovered) {\n this.show();\n }\n }, this._config.delay.show);\n }\n _leave() {\n if (this._isWithActiveTrigger()) {\n return;\n }\n this._isHovered = false;\n this._setTimeout(() => {\n if (!this._isHovered) {\n this.hide();\n }\n }, this._config.delay.hide);\n }\n _setTimeout(handler, timeout) {\n clearTimeout(this._timeout);\n this._timeout = setTimeout(handler, timeout);\n }\n _isWithActiveTrigger() {\n return Object.values(this._activeTrigger).includes(true);\n }\n _getConfig(config) {\n const dataAttributes = Manipulator.getDataAttributes(this._element);\n for (const dataAttribute of Object.keys(dataAttributes)) {\n if (DISALLOWED_ATTRIBUTES.has(dataAttribute)) {\n delete dataAttributes[dataAttribute];\n }\n }\n config = {\n ...dataAttributes,\n ...(typeof config === 'object' && config ? config : {})\n };\n config = this._mergeConfigObj(config);\n config = this._configAfterMerge(config);\n this._typeCheckConfig(config);\n return config;\n }\n _configAfterMerge(config) {\n config.container = config.container === false ? document.body : getElement(config.container);\n if (typeof config.delay === 'number') {\n config.delay = {\n show: config.delay,\n hide: config.delay\n };\n }\n if (typeof config.title === 'number') {\n config.title = config.title.toString();\n }\n if (typeof config.content === 'number') {\n config.content = config.content.toString();\n }\n return config;\n }\n _getDelegateConfig() {\n const config = {};\n for (const [key, value] of Object.entries(this._config)) {\n if (this.constructor.Default[key] !== value) {\n config[key] = value;\n }\n }\n config.selector = false;\n config.trigger = 'manual';\n\n // In the future can be replaced with:\n // const keysWithDifferentValues = Object.entries(this._config).filter(entry => this.constructor.Default[entry[0]] !== this._config[entry[0]])\n // `Object.fromEntries(keysWithDifferentValues)`\n return config;\n }\n _disposePopper() {\n if (this._popper) {\n this._popper.destroy();\n this._popper = null;\n }\n if (this.tip) {\n this.tip.remove();\n this.tip = null;\n }\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Tooltip.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Tooltip);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap popover.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$3 = 'popover';\nconst SELECTOR_TITLE = '.popover-header';\nconst SELECTOR_CONTENT = '.popover-body';\nconst Default$2 = {\n ...Tooltip.Default,\n content: '',\n offset: [0, 8],\n placement: 'right',\n template: '
' + '
' + '

' + '
' + '
',\n trigger: 'click'\n};\nconst DefaultType$2 = {\n ...Tooltip.DefaultType,\n content: '(null|string|element|function)'\n};\n\n/**\n * Class definition\n */\n\nclass Popover extends Tooltip {\n // Getters\n static get Default() {\n return Default$2;\n }\n static get DefaultType() {\n return DefaultType$2;\n }\n static get NAME() {\n return NAME$3;\n }\n\n // Overrides\n _isWithContent() {\n return this._getTitle() || this._getContent();\n }\n\n // Private\n _getContentForTemplate() {\n return {\n [SELECTOR_TITLE]: this._getTitle(),\n [SELECTOR_CONTENT]: this._getContent()\n };\n }\n _getContent() {\n return this._resolvePossibleFunction(this._config.content);\n }\n\n // Static\n static jQueryInterface(config) {\n return this.each(function () {\n const data = Popover.getOrCreateInstance(this, config);\n if (typeof config !== 'string') {\n return;\n }\n if (typeof data[config] === 'undefined') {\n throw new TypeError(`No method named \"${config}\"`);\n }\n data[config]();\n });\n }\n}\n\n/**\n * jQuery\n */\n\ndefineJQueryPlugin(Popover);\n\n/**\n * --------------------------------------------------------------------------\n * Bootstrap scrollspy.js\n * Licensed under MIT (https://github.com/twbs/bootstrap/blob/main/LICENSE)\n * --------------------------------------------------------------------------\n */\n\n\n/**\n * Constants\n */\n\nconst NAME$2 = 'scrollspy';\nconst DATA_KEY$2 = 'bs.scrollspy';\nconst EVENT_KEY$2 = `.${DATA_KEY$2}`;\nconst DATA_API_KEY = '.data-api';\nconst EVENT_ACTIVATE = `activate${EVENT_KEY$2}`;\nconst EVENT_CLICK = `click${EVENT_KEY$2}`;\nconst EVENT_LOAD_DATA_API$1 = `load${EVENT_KEY$2}${DATA_API_KEY}`;\nconst CLASS_NAME_DROPDOWN_ITEM = 'dropdown-item';\nconst CLASS_NAME_ACTIVE$1 = 'active';\nconst SELECTOR_DATA_SPY = '[data-bs-spy=\"scroll\"]';\nconst SELECTOR_TARGET_LINKS = '[href]';\nconst SELECTOR_NAV_LIST_GROUP = '.nav, .list-group';\nconst SELECTOR_NAV_LINKS = '.nav-link';\nconst SELECTOR_NAV_ITEMS = '.nav-item';\nconst SELECTOR_LIST_ITEMS = '.list-group-item';\nconst SELECTOR_LINK_ITEMS = `${SELECTOR_NAV_LINKS}, ${SELECTOR_NAV_ITEMS} > ${SELECTOR_NAV_LINKS}, ${SELECTOR_LIST_ITEMS}`;\nconst SELECTOR_DROPDOWN = '.dropdown';\nconst SELECTOR_DROPDOWN_TOGGLE$1 = '.dropdown-toggle';\nconst Default$1 = {\n offset: null,\n // TODO: v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: '0px 0px -25%',\n smoothScroll: false,\n target: null,\n threshold: [0.1, 0.5, 1]\n};\nconst DefaultType$1 = {\n offset: '(number|null)',\n // TODO v6 @deprecated, keep it for backwards compatibility reasons\n rootMargin: 'string',\n smoothScroll: 'boolean',\n target: 'element',\n threshold: 'array'\n};\n\n/**\n * Class definition\n */\n\nclass ScrollSpy extends BaseComponent {\n constructor(element, config) {\n super(element, config);\n\n // this._element is the observablesContainer and config.target the menu links wrapper\n this._targetLinks = new Map();\n this._observableSections = new Map();\n this._rootElement = getComputedStyle(this._element).overflowY === 'visible' ? null : this._element;\n this._activeTarget = null;\n this._observer = null;\n this._previousScrollData = {\n visibleEntryTop: 0,\n parentScrollTop: 0\n };\n this.refresh(); // initialize\n }\n\n // Getters\n static get Default() {\n return Default$1;\n }\n static get DefaultType() {\n return DefaultType$1;\n }\n static get NAME() {\n return NAME$2;\n }\n\n // Public\n refresh() {\n this._initializeTargetsAndObservables();\n this._maybeEnableSmoothScroll();\n if (this._observer) {\n this._observer.disconnect();\n } else {\n this._observer = this._getNewObserver();\n }\n for (const section of this._observableSections.values()) {\n this._observer.observe(section);\n }\n }\n dispose() {\n this._observer.disconnect();\n super.dispose();\n }\n\n // Private\n _configAfterMerge(config) {\n // TODO: on v6 target should be given explicitly & remove the {target: 'ss-target'} case\n config.target = getElement(config.target) || document.body;\n\n // TODO: v6 Only for backwards compatibility reasons. Use rootMargin only\n config.rootMargin = config.offset ? `${config.offset}px 0px -30%` : config.rootMargin;\n if (typeof config.threshold === 'string') {\n config.threshold = config.threshold.split(',').map(value => Number.parseFloat(value));\n }\n return config;\n }\n _maybeEnableSmoothScroll() {\n if (!this._config.smoothScroll) {\n return;\n }\n\n // unregister any previous listeners\n EventHandler.off(this._config.target, EVENT_CLICK);\n EventHandler.on(this._config.target, EVENT_CLICK, SELECTOR_TARGET_LINKS, event => {\n const observableSection = this._observableSections.get(event.target.hash);\n if (observableSection) {\n event.preventDefault();\n const root = this._rootElement || window;\n const height = observableSection.offsetTop - this._element.offsetTop;\n if (root.scrollTo) {\n root.scrollTo({\n top: height,\n behavior: 'smooth'\n });\n return;\n }\n\n // Chrome 60 doesn't support `scrollTo`\n root.scrollTop = height;\n }\n });\n }\n _getNewObserver() {\n const options = {\n root: this._rootElement,\n threshold: this._config.threshold,\n rootMargin: this._config.rootMargin\n };\n return new IntersectionObserver(entries => this._observerCallback(entries), options);\n }\n\n // The logic of selection\n _observerCallback(entries) {\n const targetElement = entry => this._targetLinks.get(`#${entry.target.id}`);\n const activate = entry => {\n this._previousScrollData.visibleEntryTop = entry.target.offsetTop;\n this._process(targetElement(entry));\n };\n const parentScrollTop = (this._rootElement || document.documentElement).scrollTop;\n const userScrollsDown = parentScrollTop >= this._previousScrollData.parentScrollTop;\n this._previousScrollData.parentScrollTop = parentScrollTop;\n for (const entry of entries) {\n if (!entry.isIntersecting) {\n this._activeTarget = null;\n this._clearActiveClass(targetElement(entry));\n continue;\n }\n const entryIsLowerThanPrevious = entry.target.offsetTop >= this._previousScrollData.visibleEntryTop;\n // if we are scrolling down, pick the bigger offsetTop\n if (userScrollsDown && entryIsLowerThanPrevious) {\n activate(entry);\n // if parent isn't scrolled, let's keep the first visible item, breaking the iteration\n if (!parentScrollTop) {\n return;\n }\n continue;\n }\n\n // if we are scrolling up, pick the smallest offsetTop\n if (!userScrollsDown && !entryIsLowerThanPrevious) {\n activate(entry);\n }\n }\n }\n _initializeTargetsAndObservables() {\n this._targetLinks = new Map();\n this._observableSections = new Map();\n const targetLinks = SelectorEngine.find(SELECTOR_TARGET_LINKS, this._config.target);\n for (const anchor of targetLinks) {\n // ensure that the anchor has an id and is not disabled\n if (!anchor.hash || isDisabled(anchor)) {\n continue;\n }\n const observableSection = SelectorEngine.findOne(decodeURI(anchor.hash), this._element);\n\n // ensure that the observableSection exists & is visible\n if (isVisible(observableSection)) {\n this._targetLinks.set(decodeURI(anchor.hash), anchor);\n this._observableSections.set(anchor.hash, observableSection);\n }\n }\n }\n _process(target) {\n if (this._activeTarget === target) {\n return;\n }\n this._clearActiveClass(this._config.target);\n this._activeTarget = target;\n target.classList.add(CLASS_NAME_ACTIVE$1);\n this._activateParents(target);\n EventHandler.trigger(this._element, EVENT_ACTIVATE, {\n relatedTarget: target\n });\n }\n _activateParents(target) {\n // Activate dropdown parents\n if (target.classList.contains(CLASS_NAME_DROPDOWN_ITEM)) {\n SelectorEngine.findOne(SELECTOR_DROPDOWN_TOGGLE$1, target.closest(SELECTOR_DROPDOWN)).classList.add(CLASS_NAME_ACTIVE$1);\n return;\n }\n for (const listGroup of SelectorEngine.parents(target, SELECTOR_NAV_LIST_GROUP)) {\n // Set triggered links parents as active\n // With both